diff options
46 files changed, 10564 insertions, 37 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 55e2ccb0b..98f47be52 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.7-1 ./ +3.6.9-1 ./ diff --git a/Dockerfile b/Dockerfile index c6593491d..eecf3630b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,15 @@ LABEL name="openshift-ansible" \        io.openshift.expose-services="" \        io.openshift.tags="openshift,install,upgrade,ansible" +USER root + +RUN INSTALL_PKGS="skopeo" && \ +    yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ +    rpm -V $INSTALL_PKGS && \ +    yum clean all + +USER ${USER_UID} +  # The playbook to be run is specified via the PLAYBOOK_FILE env var.  # This sets a default of openshift_facts.yml as it's an informative playbook  # that can help test that everything is set properly (inventory, sshkeys) diff --git a/openshift-ansible.spec b/openshift-ansible.spec index fc3773f1f..632a7b933 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@  %global __requires_exclude ^/usr/bin/ansible-playbook$  Name:           openshift-ansible -Version:        3.6.7 +Version:        3.6.9  Release:        1%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0 @@ -270,6 +270,31 @@ Atomic OpenShift Utilities includes  %changelog +* Sat Mar 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.9-1 +- Found this while searching the metrics role for logging, is this wrong? +  (sdodson@redhat.com) +- Fix overriding openshift_{logging,metrics}_image_prefix (sdodson@redhat.com) +- Make linter happy (sdodson@redhat.com) +- Specify enterprise defaults for logging and metrics images +  (sdodson@redhat.com) +- Update s2i-dotnetcore content (sdodson@redhat.com) +- Stop all services before upgrading openvswitch (sdodson@redhat.com) +- Bug 1434300 - Log entries are generated in ES after deployed logging stacks +  via ansible, but can not be found in kibana. (rmeggins@redhat.com) +- Adding error checking to the delete. (kwoodson@redhat.com) +- Updated comment. (kwoodson@redhat.com) +- Fixed doc.  Updated test to change existing key.  Updated module spec for +  required name param. (kwoodson@redhat.com) +- Adding oc_configmap to lib_openshift. (kwoodson@redhat.com) + +* Fri Mar 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.8-1 +- vendor patched upstream docker_container module. (jvallejo@redhat.com) +- add docker_image_availability check (jvallejo@redhat.com) +- Do not use auto_expand_replicas (lukas.vlcek@gmail.com) +- Adding tests to increase TC. (kwoodson@redhat.com) +- Adding a pvc create test case. (kwoodson@redhat.com) +- Cherry picking from #3711 (ewolinet@redhat.com) +  * Thu Mar 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.7-1  - openshift_logging calculate min_masters to fail early on split brain    (jcantril@redhat.com) diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py new file mode 100644 index 000000000..90d38c7a9 --- /dev/null +++ b/roles/lib_openshift/library/oc_configmap.py @@ -0,0 +1,1577 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/configmap -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_configmap +short_description: Modify, and idempotently manage openshift configmaps +description: +  - Modify openshift configmaps programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a configmap +    - absent - will remove the configmap +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: True +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: default +    aliases: [] +  from_file: +    description: +    - A dict of key, value pairs representing the configmap key and the value represents the file path. +    required: false +    default: None +    aliases: [] +  from_literal: +    description: +    - A dict of key, value pairs representing the configmap key and the value represents the string content +    required: false +    default: None +    aliases: [] +author: +- "kenny woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create group +  oc_configmap: +    state: present +    name: testmap +    from_file: +      secret: /path/to/secret +    from_literal: +      title: systemadmin +  register: configout +''' + +# -*- -*- -*- End included fragment: doc/configmap -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as verr: +                        if "No JSON object could be decoded" in verr.args: +                            err = verr.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_configmap.py -*- -*- -*- + + +# pylint: disable=too-many-arguments +class OCConfigMap(OpenShiftCLI): +    ''' Openshift ConfigMap Class + +        ConfigMaps are a way to store data inside of objects +    ''' +    def __init__(self, +                 name, +                 from_file, +                 from_literal, +                 state, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose) +        self.name = name +        self.state = state +        self._configmap = None +        self._inc_configmap = None +        self.from_file = from_file if from_file is not None else {} +        self.from_literal = from_literal if from_literal is not None else {} + +    @property +    def configmap(self): +        if self._configmap is None: +            self._configmap = self.get() + +        return self._configmap + +    @configmap.setter +    def configmap(self, inc_map): +        self._configmap = inc_map + +    @property +    def inc_configmap(self): +        if self._inc_configmap is None: +            results = self.create(dryrun=True, output=True) +            self._inc_configmap = results['results'] + +        return self._inc_configmap + +    @inc_configmap.setter +    def inc_configmap(self, inc_map): +        self._inc_configmap = inc_map + +    def from_file_to_params(self): +        '''return from_files in a string ready for cli''' +        return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()] + +    def from_literal_to_params(self): +        '''return from_literal in a string ready for cli''' +        return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()] + +    def get(self): +        '''return a configmap by name ''' +        results = self._get('configmap', self.name) +        if results['returncode'] == 0 and results['results'][0]: +            self.configmap = results['results'][0] + +        if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']: +            results['returncode'] = 0 + +        return results + +    def delete(self): +        '''delete a configmap by name''' +        return self._delete('configmap', self.name) + +    def create(self, dryrun=False, output=False): +        '''Create a configmap + +           :dryrun: Product what you would have done. default: False +           :output: Whether to parse output. default: False +        ''' + +        cmd = ['create', 'configmap', self.name] +        if self.from_literal is not None: +            cmd.extend(self.from_literal_to_params()) + +        if self.from_file is not None: +            cmd.extend(self.from_file_to_params()) + +        if dryrun: +            cmd.extend(['--dry-run', '-ojson']) + +        results = self.openshift_cmd(cmd, output=output) + +        return results + +    def update(self): +        '''run update configmap ''' +        return self._replace_content('configmap', self.name, self.inc_configmap) + +    def needs_update(self): +        '''compare the current configmap with the proposed and return if they are equal''' +        return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose) + +    @staticmethod +    # pylint: disable=too-many-return-statements,too-many-branches +    # TODO: This function should be refactored into its individual parts. +    def run_ansible(params, check_mode): +        '''run the ansible idempotent code''' + +        oc_cm = OCConfigMap(params['name'], +                            params['from_file'], +                            params['from_literal'], +                            params['state'], +                            params['namespace'], +                            kubeconfig=params['kubeconfig'], +                            verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_cm.get() + +        if 'failed' in api_rval: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval, 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if not Utils.exists(api_rval['results'], params['name']): +                return {'changed': False, 'state': 'absent'} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +            api_rval = oc_cm.delete() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        ######## +        # Create +        ######## +        if state == 'present': +            if not Utils.exists(api_rval['results'], params['name']): + +                if check_mode: +                    return {'changed': True, 'msg': 'Would have performed a create.'} + +                api_rval = oc_cm.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                api_rval = oc_cm.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_cm.needs_update(): + +                api_rval = oc_cm.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                api_rval = oc_cm.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} + +# -*- -*- -*- End included fragment: class/oc_configmap.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_configmap.py -*- -*- -*- + + +def main(): +    ''' +    ansible oc module for managing OpenShift configmap objects +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, required=True, type='str'), +            from_file=dict(default=None, type='dict'), +            from_literal=dict(default=None, type='dict'), +        ), +        supports_check_mode=True, +    ) + + +    rval = OCConfigMap.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_configmap.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py new file mode 100644 index 000000000..df0b0d86a --- /dev/null +++ b/roles/lib_openshift/library/oc_pvc.py @@ -0,0 +1,1733 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/pvc -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_pvc +short_description: Modify, and idempotently manage openshift persistent volume claims +description: +  - Modify openshift persistent volume claims programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a pvc +    - absent - will remove a pvc +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: str +    aliases: [] +  volume_capacity: +    description: +    - The requested volume capacity +    required: False +    default: 1G +    aliases: [] +  access_modes: +    description: +    - The access modes allowed for the pvc +    - Expects a list +    required: False +    default: ReadWriteOnce +    choices: +    - ReadWriteOnce +    - ReadOnlyMany +    - ReadWriteMany +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a pvc +  oc_pvc: +    namespace: awesomeapp +    name: dbstorage +    access_modes: +    - ReadWriteOnce +    volume_capacity: 5G +  register: pvcout +''' + +# -*- -*- -*- End included fragment: doc/pvc -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as verr: +                        if "No JSON object could be decoded" in verr.args: +                            err = verr.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/pvc.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class PersistentVolumeClaimConfig(object): +    ''' Handle pvc options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 access_modes=None, +                 vol_capacity='1G'): +        ''' constructor for handling pvc options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.access_modes = access_modes +        self.vol_capacity = vol_capacity +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' return a service as a dict ''' +        # version +        self.data['apiVersion'] = 'v1' +        # kind +        self.data['kind'] = 'PersistentVolumeClaim' +        # metadata +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        # spec +        self.data['spec'] = {} +        self.data['spec']['accessModes'] = ['ReadWriteOnce'] +        if self.access_modes: +            self.data['spec']['accessModes'] = self.access_modes + +        # storage capacity +        self.data['spec']['resources'] = {} +        self.data['spec']['resources']['requests'] = {} +        self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class PersistentVolumeClaim(Yedit): +    ''' Class to wrap the oc command line tools ''' +    access_modes_path = "spec.accessModes" +    volume_capacity_path = "spec.requests.storage" +    volume_name_path = "spec.volumeName" +    bound_path = "status.phase" +    kind = 'PersistentVolumeClaim' + +    def __init__(self, content): +        '''RoleBinding constructor''' +        super(PersistentVolumeClaim, self).__init__(content=content) +        self._access_modes = None +        self._volume_capacity = None +        self._volume_name = None + +    @property +    def volume_name(self): +        ''' volume_name property ''' +        if self._volume_name is None: +            self._volume_name = self.get_volume_name() +        return self._volume_name + +    @volume_name.setter +    def volume_name(self, data): +        ''' volume_name property setter''' +        self._volume_name = data + +    @property +    def access_modes(self): +        ''' access_modes property ''' +        if self._access_modes is None: +            self._access_modes = self.get_access_modes() +            if not isinstance(self._access_modes, list): +                self._access_modes = list(self._access_modes) + +        return self._access_modes + +    @access_modes.setter +    def access_modes(self, data): +        ''' access_modes property setter''' +        if not isinstance(data, list): +            data = list(data) + +        self._access_modes = data + +    @property +    def volume_capacity(self): +        ''' volume_capacity property ''' +        if self._volume_capacity is None: +            self._volume_capacity = self.get_volume_capacity() +        return self._volume_capacity + +    @volume_capacity.setter +    def volume_capacity(self, data): +        ''' volume_capacity property setter''' +        self._volume_capacity = data + +    def get_access_modes(self): +        '''get access_modes''' +        return self.get(PersistentVolumeClaim.access_modes_path) or [] + +    def get_volume_capacity(self): +        '''get volume_capacity''' +        return self.get(PersistentVolumeClaim.volume_capacity_path) or [] + +    def get_volume_name(self): +        '''get volume_name''' +        return self.get(PersistentVolumeClaim.volume_name_path) or [] + +    def is_bound(self): +        '''return whether volume is bound''' +        return self.get(PersistentVolumeClaim.bound_path) or [] + +    #### ADD ##### +    def add_access_mode(self, inc_mode): +        ''' add an access_mode''' +        if self.access_modes: +            self.access_modes.append(inc_mode) +        else: +            self.put(PersistentVolumeClaim.access_modes_path, [inc_mode]) + +        return True + +    #### /ADD ##### + +    #### Remove ##### +    def remove_access_mode(self, inc_mode): +        ''' remove an access_mode''' +        try: +            self.access_modes.remove(inc_mode) +        except ValueError as _: +            return False + +        return True + +    #### /REMOVE ##### + +    #### UPDATE ##### +    def update_access_mode(self, inc_mode): +        ''' update an access_mode''' +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return self.add_access_mode(inc_mode) + +        self.access_modes[index] = inc_mode + +        return True + +    #### /UPDATE ##### + +    #### FIND #### +    def find_access_mode(self, inc_mode): +        ''' find a user ''' +        index = None +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return index + +        return index + +# -*- -*- -*- End included fragment: lib/pvc.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_pvc.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCPVC(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'pvc' + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCPVC, self).__init__(config.namespace, config.kubeconfig) +        self.config = config +        self.namespace = config.namespace +        self._pvc = None + +    @property +    def pvc(self): +        ''' property function pvc''' +        if not self._pvc: +            self.get() +        return self._pvc + +    @pvc.setter +    def pvc(self, data): +        ''' setter function for yedit var ''' +        self._pvc = data + +    def bound(self): +        '''return whether the pvc is bound''' +        if self.pvc.get_volume_name(): +            return True + +        return False + +    def exists(self): +        ''' return whether a pvc exists ''' +        if self.pvc: +            return True + +        return False + +    def get(self): +        '''return pvc information ''' +        result = self._get(self.kind, self.config.name) +        if result['returncode'] == 0: +            self.pvc = PersistentVolumeClaim(content=result['results'][0]) +        elif '\"%s\" not found' % self.config.name in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        '''delete the object''' +        return self._delete(self.kind, self.config.name) + +    def create(self): +        '''create the object''' +        return self._create_from_content(self.config.name, self.config.data) + +    def update(self): +        '''update the object''' +        # need to update the tls information and the service name +        return self._replace_content(self.kind, self.config.name, self.config.data) + +    def needs_update(self): +        ''' verify an update is needed ''' +        if self.pvc.get_volume_name() or self.pvc.is_bound(): +            return False + +        skip = [] +        return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the idempotent ansible code''' +        pconfig = PersistentVolumeClaimConfig(params['name'], +                                              params['namespace'], +                                              params['kubeconfig'], +                                              params['access_modes'], +                                              params['volume_capacity'], +                                             ) +        oc_pvc = OCPVC(pconfig, verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_pvc.get() +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_pvc.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_pvc.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_pvc.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                # Create it here +                api_rval = oc_pvc.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name(): +                api_rval['msg'] = '##### - This volume is currently bound.  Will not update - ####' +                return {'changed': False, 'results': api_rval, 'state': state} + +            if oc_pvc.needs_update(): +                api_rval = oc_pvc.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} + +# -*- -*- -*- End included fragment: class/oc_pvc.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_pvc.py -*- -*- -*- + +#pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for pvc +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            name=dict(default=None, required=True, type='str'), +            namespace=dict(default=None, required=True, type='str'), +            volume_capacity=dict(default='1G', type='str'), +            access_modes=dict(default='ReadWriteOnce', +                              choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], +                              type='str'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCPVC.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_pvc.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py new file mode 100644 index 000000000..2ccc00301 --- /dev/null +++ b/roles/lib_openshift/library/oc_user.py @@ -0,0 +1,1714 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/user -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_user +short_description: Create, modify, and idempotently manage openshift users. +description: +  - Modify openshift users programmatically. +options: +  state: +    description: +    - State controls the action that will be taken with resource +    - 'present' will create or update a user to the desired state +    - 'absent' will ensure user is removed +    - 'list' will read and return a list of users +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  username: +    description: +    - Short username to query/modify. +    required: false +    default: None +    aliases: [] +  full_name: +    description: +    - String with the full name/description of the user. +    required: false +    default: None +    aliases: [] +  groups: +    description: +    - List of groups the user should be a member of. This does not add/update the legacy 'groups' field in the OpenShift user object, but makes user entries into the appropriate OpenShift group object for the given user. +    required: false +    default: [] +    aliases: [] +author: +- "Joel Diaz <jdiaz@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: Ensure user exists +  oc_user: +    state: present +    username: johndoe +    full_name "John Doe" +    groups: +    - dedicated-admins +  register: user_johndoe + +user_johndoe variable will have contents like: +ok: [ded-int-aws-master-61034] => { +    "user_johndoe": { +        "changed": true, +        "results": { +            "cmd": "oc -n default get users johndoe -o json", +            "results": [ +                { +                    "apiVersion": "v1", +                    "fullName": "John DOe", +                    "groups": null, +                    "identities": null, +                    "kind": "User", +                    "metadata": { +                        "creationTimestamp": "2017-02-28T15:09:21Z", +                        "name": "johndoe", +                        "resourceVersion": "848781", +                        "selfLink": "/oapi/v1/users/johndoe", +                        "uid": "e23d3300-fdc7-11e6-9e3e-12822d6b7656" +                    } +                } +            ], +            "returncode": 0 +        }, +        "state": "present" +    } +} +'groups' is empty because this field is the OpenShift user object's 'group' field. + +- name: Ensure user does not exist +  oc_user: +    state: absent +    username: johndoe + +- name: List user's info +  oc_user: +    state: list +    username: johndoe +  register: user_johndoe + +user_johndoe will have contents similar to: +ok: [ded-int-aws-master-61034] => { +    "user_johndoe": { +        "changed": false, +        "results": [ +            { +                "apiVersion": "v1", +                "fullName": "John Doe", +                "groups": null, +                "identities": null, +                "kind": "User", +                "metadata": { +                    "creationTimestamp": "2017-02-28T15:04:44Z", +                    "name": "johndoe", +                    "resourceVersion": "848280", +                    "selfLink": "/oapi/v1/users/johndoe", +                    "uid": "3d479ad2-fdc7-11e6-9e3e-12822d6b7656" +                } +            } +        ], +        "state": "list" +    } +} +''' + +# -*- -*- -*- End included fragment: doc/user -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as verr: +                        if "No JSON object could be decoded" in verr.args: +                            err = verr.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/user.py -*- -*- -*- + + +class UserConfig(object): +    ''' Handle user options ''' +    def __init__(self, +                 kubeconfig, +                 username, +                 full_name): +        ''' constructor for handling user options ''' +        self.kubeconfig = kubeconfig +        self.username = username +        self.full_name = full_name + +        self.data = {} +        self.create_dict() + +    def create_dict(self): +        ''' return a user as a dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['fullName'] = self.full_name +        self.data['groups'] = None +        self.data['identities'] = None +        self.data['kind'] = 'User' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.username + + +# pylint: disable=too-many-instance-attributes +class User(Yedit): +    ''' Class to wrap the oc command line tools ''' +    kind = 'user' + +    def __init__(self, content): +        '''User constructor''' +        super(User, self).__init__(content=content) + +# -*- -*- -*- End included fragment: lib/user.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_user.py -*- -*- -*- + +# pylint: disable=too-many-instance-attributes +class OCUser(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'users' + +    def __init__(self, +                 config, +                 groups=None, +                 verbose=False): +        ''' Constructor for OCUser ''' +        # namespace has no meaning for user operations, hardcode to 'default' +        super(OCUser, self).__init__('default', config.kubeconfig) +        self.config = config +        self.groups = groups +        self._user = None + +    @property +    def user(self): +        ''' property function user''' +        if not self._user: +            self.get() +        return self._user + +    @user.setter +    def user(self, data): +        ''' setter function for user ''' +        self._user = data + +    def exists(self): +        ''' return whether a user exists ''' +        if self.user: +            return True + +        return False + +    def get(self): +        ''' return user information ''' +        result = self._get(self.kind, self.config.username) +        if result['returncode'] == 0: +            self.user = User(content=result['results'][0]) +        elif 'users \"%s\" not found' % self.config.username in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        ''' delete the object ''' +        return self._delete(self.kind, self.config.username) + +    def create_group_entries(self): +        ''' make entries for user to the provided group list ''' +        if self.groups != None: +            for group in self.groups: +                cmd = ['groups', 'add-users', group, self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval + +                return rval + +        return {'returncode': 0} + +    def create(self): +        ''' create the object ''' +        rval = self.create_group_entries() +        if rval['returncode'] != 0: +            return rval + +        return self._create_from_content(self.config.username, self.config.data) + +    def group_update(self): +        ''' update group membership ''' +        rval = {'returncode': 0} +        cmd = ['get', 'groups', '-o', 'json'] +        all_groups = self.openshift_cmd(cmd, output=True) + +        # pylint misindentifying all_groups['results']['items'] type +        # pylint: disable=invalid-sequence-index +        for group in all_groups['results']['items']: +            # If we're supposed to be in this group +            if group['metadata']['name'] in self.groups \ +               and (group['users'] is None or self.config.username not in group['users']): +                cmd = ['groups', 'add-users', group['metadata']['name'], +                       self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval +            # else if we're in the group, but aren't supposed to be +            elif group['users'] != None and self.config.username in group['users'] \ +                 and group['metadata']['name'] not in self.groups: +                cmd = ['groups', 'remove-users', group['metadata']['name'], +                       self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval + +        return rval + +    def update(self): +        ''' update the object ''' +        rval = self.group_update() +        if rval['returncode'] != 0: +            return rval + +        # need to update the user's info +        return self._replace_content(self.kind, self.config.username, self.config.data, force=True) + +    def needs_group_update(self): +        ''' check if there are group membership changes ''' +        cmd = ['get', 'groups', '-o', 'json'] +        all_groups = self.openshift_cmd(cmd, output=True) + +        # pylint misindentifying all_groups['results']['items'] type +        # pylint: disable=invalid-sequence-index +        for group in all_groups['results']['items']: +            # If we're supposed to be in this group +            if group['metadata']['name'] in self.groups \ +               and (group['users'] is None or self.config.username not in group['users']): +                return True +            # else if we're in the group, but aren't supposed to be +            elif group['users'] != None and self.config.username in group['users'] \ +                 and group['metadata']['name'] not in self.groups: +                return True + +        return False + +    def needs_update(self): +        ''' verify an update is needed ''' +        skip = [] +        if self.needs_group_update(): +            return True + +        return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode=False): +        ''' run the idempotent ansible code + +            params comes from the ansible portion of this module +            check_mode: does the module support check mode. (module.check_mode) +        ''' + +        uconfig = UserConfig(params['kubeconfig'], +                             params['username'], +                             params['full_name'], +                            ) + +        oc_user = OCUser(uconfig, params['groups'], +                         verbose=params['debug']) +        state = params['state'] + +        api_rval = oc_user.get() + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': "list"} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_user.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'Would have performed a delete.'} + +                api_rval = oc_user.delete() + +                return {'changed': True, 'results': api_rval, 'state': "absent"} +            return {'changed': False, 'state': "absent"} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_user.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'Would have performed a create.'} + +                # Create it here +                api_rval = oc_user.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_user.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': "present"} + +            ######## +            # Update +            ######## +            if oc_user.needs_update(): +                api_rval = oc_user.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                orig_cmd = api_rval['cmd'] +                # return the created object +                api_rval = oc_user.get() +                # overwrite the get/list cmd +                api_rval['cmd'] = orig_cmd + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': "present"} + +            return {'changed': False, 'results': api_rval, 'state': "present"} + +        return {'failed': True, +                'changed': False, +                'results': 'Unknown state passed. %s' % state, +                'state': "unknown"} + +# -*- -*- -*- End included fragment: class/oc_user.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_user.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for user +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            username=dict(default=None, type='str'), +            full_name=dict(default=None, type='str'), +            # setting groups for user data will not populate the +            # 'groups' field in the user data. +            # it will call out to the group data and make the user +            # entry there +            groups=dict(default=[], type='list'), +        ), +        supports_check_mode=True, +    ) + +    results = OCUser.run_ansible(module.params, module.check_mode) + +    if 'failed' in results: +        module.fail_json(**results) + +    module.exit_json(**results) + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_user.py -*- -*- -*- diff --git a/roles/lib_openshift/src/ansible/oc_configmap.py b/roles/lib_openshift/src/ansible/oc_configmap.py new file mode 100644 index 000000000..974f72499 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_configmap.py @@ -0,0 +1,32 @@ +# pylint: skip-file +# flake8: noqa + + +def main(): +    ''' +    ansible oc module for managing OpenShift configmap objects +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, required=True, type='str'), +            from_file=dict(default=None, type='dict'), +            from_literal=dict(default=None, type='dict'), +        ), +        supports_check_mode=True, +    ) + + +    rval = OCConfigMap.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py new file mode 100644 index 000000000..a5181e281 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_pvc.py @@ -0,0 +1,35 @@ +# pylint: skip-file +# flake8: noqa + +#pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for pvc +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            name=dict(default=None, required=True, type='str'), +            namespace=dict(default=None, required=True, type='str'), +            volume_capacity=dict(default='1G', type='str'), +            access_modes=dict(default='ReadWriteOnce', +                              choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], +                              type='str'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCPVC.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/ansible/oc_user.py b/roles/lib_openshift/src/ansible/oc_user.py new file mode 100644 index 000000000..6b1440796 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_user.py @@ -0,0 +1,34 @@ +# pylint: skip-file +# flake8: noqa + +def main(): +    ''' +    ansible oc module for user +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            username=dict(default=None, type='str'), +            full_name=dict(default=None, type='str'), +            # setting groups for user data will not populate the +            # 'groups' field in the user data. +            # it will call out to the group data and make the user +            # entry there +            groups=dict(default=[], type='list'), +        ), +        supports_check_mode=True, +    ) + +    results = OCUser.run_ansible(module.params, module.check_mode) + +    if 'failed' in results: +        module.fail_json(**results) + +    module.exit_json(**results) + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/class/oc_configmap.py b/roles/lib_openshift/src/class/oc_configmap.py new file mode 100644 index 000000000..87de3e1df --- /dev/null +++ b/roles/lib_openshift/src/class/oc_configmap.py @@ -0,0 +1,187 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-arguments +class OCConfigMap(OpenShiftCLI): +    ''' Openshift ConfigMap Class + +        ConfigMaps are a way to store data inside of objects +    ''' +    def __init__(self, +                 name, +                 from_file, +                 from_literal, +                 state, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OpenshiftOC ''' +        super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose) +        self.name = name +        self.state = state +        self._configmap = None +        self._inc_configmap = None +        self.from_file = from_file if from_file is not None else {} +        self.from_literal = from_literal if from_literal is not None else {} + +    @property +    def configmap(self): +        if self._configmap is None: +            self._configmap = self.get() + +        return self._configmap + +    @configmap.setter +    def configmap(self, inc_map): +        self._configmap = inc_map + +    @property +    def inc_configmap(self): +        if self._inc_configmap is None: +            results = self.create(dryrun=True, output=True) +            self._inc_configmap = results['results'] + +        return self._inc_configmap + +    @inc_configmap.setter +    def inc_configmap(self, inc_map): +        self._inc_configmap = inc_map + +    def from_file_to_params(self): +        '''return from_files in a string ready for cli''' +        return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()] + +    def from_literal_to_params(self): +        '''return from_literal in a string ready for cli''' +        return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()] + +    def get(self): +        '''return a configmap by name ''' +        results = self._get('configmap', self.name) +        if results['returncode'] == 0 and results['results'][0]: +            self.configmap = results['results'][0] + +        if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']: +            results['returncode'] = 0 + +        return results + +    def delete(self): +        '''delete a configmap by name''' +        return self._delete('configmap', self.name) + +    def create(self, dryrun=False, output=False): +        '''Create a configmap + +           :dryrun: Product what you would have done. default: False +           :output: Whether to parse output. default: False +        ''' + +        cmd = ['create', 'configmap', self.name] +        if self.from_literal is not None: +            cmd.extend(self.from_literal_to_params()) + +        if self.from_file is not None: +            cmd.extend(self.from_file_to_params()) + +        if dryrun: +            cmd.extend(['--dry-run', '-ojson']) + +        results = self.openshift_cmd(cmd, output=output) + +        return results + +    def update(self): +        '''run update configmap ''' +        return self._replace_content('configmap', self.name, self.inc_configmap) + +    def needs_update(self): +        '''compare the current configmap with the proposed and return if they are equal''' +        return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose) + +    @staticmethod +    # pylint: disable=too-many-return-statements,too-many-branches +    # TODO: This function should be refactored into its individual parts. +    def run_ansible(params, check_mode): +        '''run the ansible idempotent code''' + +        oc_cm = OCConfigMap(params['name'], +                            params['from_file'], +                            params['from_literal'], +                            params['state'], +                            params['namespace'], +                            kubeconfig=params['kubeconfig'], +                            verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_cm.get() + +        if 'failed' in api_rval: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval, 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if not Utils.exists(api_rval['results'], params['name']): +                return {'changed': False, 'state': 'absent'} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +            api_rval = oc_cm.delete() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        ######## +        # Create +        ######## +        if state == 'present': +            if not Utils.exists(api_rval['results'], params['name']): + +                if check_mode: +                    return {'changed': True, 'msg': 'Would have performed a create.'} + +                api_rval = oc_cm.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                api_rval = oc_cm.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_cm.needs_update(): + +                api_rval = oc_cm.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                api_rval = oc_cm.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py new file mode 100644 index 000000000..c73abc47c --- /dev/null +++ b/roles/lib_openshift/src/class/oc_pvc.py @@ -0,0 +1,167 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class OCPVC(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'pvc' + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCPVC, self).__init__(config.namespace, config.kubeconfig) +        self.config = config +        self.namespace = config.namespace +        self._pvc = None + +    @property +    def pvc(self): +        ''' property function pvc''' +        if not self._pvc: +            self.get() +        return self._pvc + +    @pvc.setter +    def pvc(self, data): +        ''' setter function for yedit var ''' +        self._pvc = data + +    def bound(self): +        '''return whether the pvc is bound''' +        if self.pvc.get_volume_name(): +            return True + +        return False + +    def exists(self): +        ''' return whether a pvc exists ''' +        if self.pvc: +            return True + +        return False + +    def get(self): +        '''return pvc information ''' +        result = self._get(self.kind, self.config.name) +        if result['returncode'] == 0: +            self.pvc = PersistentVolumeClaim(content=result['results'][0]) +        elif '\"%s\" not found' % self.config.name in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        '''delete the object''' +        return self._delete(self.kind, self.config.name) + +    def create(self): +        '''create the object''' +        return self._create_from_content(self.config.name, self.config.data) + +    def update(self): +        '''update the object''' +        # need to update the tls information and the service name +        return self._replace_content(self.kind, self.config.name, self.config.data) + +    def needs_update(self): +        ''' verify an update is needed ''' +        if self.pvc.get_volume_name() or self.pvc.is_bound(): +            return False + +        skip = [] +        return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the idempotent ansible code''' +        pconfig = PersistentVolumeClaimConfig(params['name'], +                                              params['namespace'], +                                              params['kubeconfig'], +                                              params['access_modes'], +                                              params['volume_capacity'], +                                             ) +        oc_pvc = OCPVC(pconfig, verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_pvc.get() +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_pvc.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_pvc.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_pvc.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                # Create it here +                api_rval = oc_pvc.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name(): +                api_rval['msg'] = '##### - This volume is currently bound.  Will not update - ####' +                return {'changed': False, 'results': api_rval, 'state': state} + +            if oc_pvc.needs_update(): +                api_rval = oc_pvc.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} diff --git a/roles/lib_openshift/src/class/oc_user.py b/roles/lib_openshift/src/class/oc_user.py new file mode 100644 index 000000000..d9e4eac13 --- /dev/null +++ b/roles/lib_openshift/src/class/oc_user.py @@ -0,0 +1,227 @@ +# pylint: skip-file +# flake8: noqa + +# pylint: disable=too-many-instance-attributes +class OCUser(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'users' + +    def __init__(self, +                 config, +                 groups=None, +                 verbose=False): +        ''' Constructor for OCUser ''' +        # namespace has no meaning for user operations, hardcode to 'default' +        super(OCUser, self).__init__('default', config.kubeconfig) +        self.config = config +        self.groups = groups +        self._user = None + +    @property +    def user(self): +        ''' property function user''' +        if not self._user: +            self.get() +        return self._user + +    @user.setter +    def user(self, data): +        ''' setter function for user ''' +        self._user = data + +    def exists(self): +        ''' return whether a user exists ''' +        if self.user: +            return True + +        return False + +    def get(self): +        ''' return user information ''' +        result = self._get(self.kind, self.config.username) +        if result['returncode'] == 0: +            self.user = User(content=result['results'][0]) +        elif 'users \"%s\" not found' % self.config.username in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        ''' delete the object ''' +        return self._delete(self.kind, self.config.username) + +    def create_group_entries(self): +        ''' make entries for user to the provided group list ''' +        if self.groups != None: +            for group in self.groups: +                cmd = ['groups', 'add-users', group, self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval + +                return rval + +        return {'returncode': 0} + +    def create(self): +        ''' create the object ''' +        rval = self.create_group_entries() +        if rval['returncode'] != 0: +            return rval + +        return self._create_from_content(self.config.username, self.config.data) + +    def group_update(self): +        ''' update group membership ''' +        rval = {'returncode': 0} +        cmd = ['get', 'groups', '-o', 'json'] +        all_groups = self.openshift_cmd(cmd, output=True) + +        # pylint misindentifying all_groups['results']['items'] type +        # pylint: disable=invalid-sequence-index +        for group in all_groups['results']['items']: +            # If we're supposed to be in this group +            if group['metadata']['name'] in self.groups \ +               and (group['users'] is None or self.config.username not in group['users']): +                cmd = ['groups', 'add-users', group['metadata']['name'], +                       self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval +            # else if we're in the group, but aren't supposed to be +            elif group['users'] != None and self.config.username in group['users'] \ +                 and group['metadata']['name'] not in self.groups: +                cmd = ['groups', 'remove-users', group['metadata']['name'], +                       self.config.username] +                rval = self.openshift_cmd(cmd, oadm=True) +                if rval['returncode'] != 0: +                    return rval + +        return rval + +    def update(self): +        ''' update the object ''' +        rval = self.group_update() +        if rval['returncode'] != 0: +            return rval + +        # need to update the user's info +        return self._replace_content(self.kind, self.config.username, self.config.data, force=True) + +    def needs_group_update(self): +        ''' check if there are group membership changes ''' +        cmd = ['get', 'groups', '-o', 'json'] +        all_groups = self.openshift_cmd(cmd, output=True) + +        # pylint misindentifying all_groups['results']['items'] type +        # pylint: disable=invalid-sequence-index +        for group in all_groups['results']['items']: +            # If we're supposed to be in this group +            if group['metadata']['name'] in self.groups \ +               and (group['users'] is None or self.config.username not in group['users']): +                return True +            # else if we're in the group, but aren't supposed to be +            elif group['users'] != None and self.config.username in group['users'] \ +                 and group['metadata']['name'] not in self.groups: +                return True + +        return False + +    def needs_update(self): +        ''' verify an update is needed ''' +        skip = [] +        if self.needs_group_update(): +            return True + +        return not Utils.check_def_equal(self.config.data, self.user.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode=False): +        ''' run the idempotent ansible code + +            params comes from the ansible portion of this module +            check_mode: does the module support check mode. (module.check_mode) +        ''' + +        uconfig = UserConfig(params['kubeconfig'], +                             params['username'], +                             params['full_name'], +                            ) + +        oc_user = OCUser(uconfig, params['groups'], +                         verbose=params['debug']) +        state = params['state'] + +        api_rval = oc_user.get() + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': "list"} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_user.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'Would have performed a delete.'} + +                api_rval = oc_user.delete() + +                return {'changed': True, 'results': api_rval, 'state': "absent"} +            return {'changed': False, 'state': "absent"} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_user.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'Would have performed a create.'} + +                # Create it here +                api_rval = oc_user.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_user.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': "present"} + +            ######## +            # Update +            ######## +            if oc_user.needs_update(): +                api_rval = oc_user.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                orig_cmd = api_rval['cmd'] +                # return the created object +                api_rval = oc_user.get() +                # overwrite the get/list cmd +                api_rval['cmd'] = orig_cmd + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': "present"} + +            return {'changed': False, 'results': api_rval, 'state': "present"} + +        return {'failed': True, +                'changed': False, +                'results': 'Unknown state passed. %s' % state, +                'state': "unknown"} diff --git a/roles/lib_openshift/src/doc/configmap b/roles/lib_openshift/src/doc/configmap new file mode 100644 index 000000000..5ca8292c4 --- /dev/null +++ b/roles/lib_openshift/src/doc/configmap @@ -0,0 +1,72 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_configmap +short_description: Modify, and idempotently manage openshift configmaps +description: +  - Modify openshift configmaps programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a configmap +    - absent - will remove the configmap +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: True +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: default +    aliases: [] +  from_file: +    description: +    - A dict of key, value pairs representing the configmap key and the value represents the file path. +    required: false +    default: None +    aliases: [] +  from_literal: +    description: +    - A dict of key, value pairs representing the configmap key and the value represents the string content +    required: false +    default: None +    aliases: [] +author: +- "kenny woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create group +  oc_configmap: +    state: present +    name: testmap +    from_file: +      secret: /path/to/secret +    from_literal: +      title: systemadmin +  register: configout +''' diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc new file mode 100644 index 000000000..9240f2a0f --- /dev/null +++ b/roles/lib_openshift/src/doc/pvc @@ -0,0 +1,76 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_pvc +short_description: Modify, and idempotently manage openshift persistent volume claims +description: +  - Modify openshift persistent volume claims programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a pvc +    - absent - will remove a pvc +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: str +    aliases: [] +  volume_capacity: +    description: +    - The requested volume capacity +    required: False +    default: 1G +    aliases: [] +  access_modes: +    description: +    - The access modes allowed for the pvc +    - Expects a list +    required: False +    default: ReadWriteOnce +    choices: +    - ReadWriteOnce +    - ReadOnlyMany +    - ReadWriteMany +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a pvc +  oc_pvc: +    namespace: awesomeapp +    name: dbstorage +    access_modes: +    - ReadWriteOnce +    volume_capacity: 5G +  register: pvcout +''' diff --git a/roles/lib_openshift/src/doc/user b/roles/lib_openshift/src/doc/user new file mode 100644 index 000000000..65ee01eb7 --- /dev/null +++ b/roles/lib_openshift/src/doc/user @@ -0,0 +1,128 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_user +short_description: Create, modify, and idempotently manage openshift users. +description: +  - Modify openshift users programmatically. +options: +  state: +    description: +    - State controls the action that will be taken with resource +    - 'present' will create or update a user to the desired state +    - 'absent' will ensure user is removed +    - 'list' will read and return a list of users +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  username: +    description: +    - Short username to query/modify. +    required: false +    default: None +    aliases: [] +  full_name: +    description: +    - String with the full name/description of the user. +    required: false +    default: None +    aliases: [] +  groups: +    description: +    - List of groups the user should be a member of. This does not add/update the legacy 'groups' field in the OpenShift user object, but makes user entries into the appropriate OpenShift group object for the given user. +    required: false +    default: [] +    aliases: [] +author: +- "Joel Diaz <jdiaz@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: Ensure user exists +  oc_user: +    state: present +    username: johndoe +    full_name "John Doe" +    groups: +    - dedicated-admins +  register: user_johndoe + +user_johndoe variable will have contents like: +ok: [ded-int-aws-master-61034] => { +    "user_johndoe": { +        "changed": true, +        "results": { +            "cmd": "oc -n default get users johndoe -o json", +            "results": [ +                { +                    "apiVersion": "v1", +                    "fullName": "John DOe", +                    "groups": null, +                    "identities": null, +                    "kind": "User", +                    "metadata": { +                        "creationTimestamp": "2017-02-28T15:09:21Z", +                        "name": "johndoe", +                        "resourceVersion": "848781", +                        "selfLink": "/oapi/v1/users/johndoe", +                        "uid": "e23d3300-fdc7-11e6-9e3e-12822d6b7656" +                    } +                } +            ], +            "returncode": 0 +        }, +        "state": "present" +    } +} +'groups' is empty because this field is the OpenShift user object's 'group' field. + +- name: Ensure user does not exist +  oc_user: +    state: absent +    username: johndoe + +- name: List user's info +  oc_user: +    state: list +    username: johndoe +  register: user_johndoe + +user_johndoe will have contents similar to: +ok: [ded-int-aws-master-61034] => { +    "user_johndoe": { +        "changed": false, +        "results": [ +            { +                "apiVersion": "v1", +                "fullName": "John Doe", +                "groups": null, +                "identities": null, +                "kind": "User", +                "metadata": { +                    "creationTimestamp": "2017-02-28T15:04:44Z", +                    "name": "johndoe", +                    "resourceVersion": "848280", +                    "selfLink": "/oapi/v1/users/johndoe", +                    "uid": "3d479ad2-fdc7-11e6-9e3e-12822d6b7656" +                } +            } +        ], +        "state": "list" +    } +} +''' diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py new file mode 100644 index 000000000..929b50990 --- /dev/null +++ b/roles/lib_openshift/src/lib/pvc.py @@ -0,0 +1,167 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class PersistentVolumeClaimConfig(object): +    ''' Handle pvc options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 access_modes=None, +                 vol_capacity='1G'): +        ''' constructor for handling pvc options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.access_modes = access_modes +        self.vol_capacity = vol_capacity +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' return a service as a dict ''' +        # version +        self.data['apiVersion'] = 'v1' +        # kind +        self.data['kind'] = 'PersistentVolumeClaim' +        # metadata +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        # spec +        self.data['spec'] = {} +        self.data['spec']['accessModes'] = ['ReadWriteOnce'] +        if self.access_modes: +            self.data['spec']['accessModes'] = self.access_modes + +        # storage capacity +        self.data['spec']['resources'] = {} +        self.data['spec']['resources']['requests'] = {} +        self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class PersistentVolumeClaim(Yedit): +    ''' Class to wrap the oc command line tools ''' +    access_modes_path = "spec.accessModes" +    volume_capacity_path = "spec.requests.storage" +    volume_name_path = "spec.volumeName" +    bound_path = "status.phase" +    kind = 'PersistentVolumeClaim' + +    def __init__(self, content): +        '''RoleBinding constructor''' +        super(PersistentVolumeClaim, self).__init__(content=content) +        self._access_modes = None +        self._volume_capacity = None +        self._volume_name = None + +    @property +    def volume_name(self): +        ''' volume_name property ''' +        if self._volume_name is None: +            self._volume_name = self.get_volume_name() +        return self._volume_name + +    @volume_name.setter +    def volume_name(self, data): +        ''' volume_name property setter''' +        self._volume_name = data + +    @property +    def access_modes(self): +        ''' access_modes property ''' +        if self._access_modes is None: +            self._access_modes = self.get_access_modes() +            if not isinstance(self._access_modes, list): +                self._access_modes = list(self._access_modes) + +        return self._access_modes + +    @access_modes.setter +    def access_modes(self, data): +        ''' access_modes property setter''' +        if not isinstance(data, list): +            data = list(data) + +        self._access_modes = data + +    @property +    def volume_capacity(self): +        ''' volume_capacity property ''' +        if self._volume_capacity is None: +            self._volume_capacity = self.get_volume_capacity() +        return self._volume_capacity + +    @volume_capacity.setter +    def volume_capacity(self, data): +        ''' volume_capacity property setter''' +        self._volume_capacity = data + +    def get_access_modes(self): +        '''get access_modes''' +        return self.get(PersistentVolumeClaim.access_modes_path) or [] + +    def get_volume_capacity(self): +        '''get volume_capacity''' +        return self.get(PersistentVolumeClaim.volume_capacity_path) or [] + +    def get_volume_name(self): +        '''get volume_name''' +        return self.get(PersistentVolumeClaim.volume_name_path) or [] + +    def is_bound(self): +        '''return whether volume is bound''' +        return self.get(PersistentVolumeClaim.bound_path) or [] + +    #### ADD ##### +    def add_access_mode(self, inc_mode): +        ''' add an access_mode''' +        if self.access_modes: +            self.access_modes.append(inc_mode) +        else: +            self.put(PersistentVolumeClaim.access_modes_path, [inc_mode]) + +        return True + +    #### /ADD ##### + +    #### Remove ##### +    def remove_access_mode(self, inc_mode): +        ''' remove an access_mode''' +        try: +            self.access_modes.remove(inc_mode) +        except ValueError as _: +            return False + +        return True + +    #### /REMOVE ##### + +    #### UPDATE ##### +    def update_access_mode(self, inc_mode): +        ''' update an access_mode''' +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return self.add_access_mode(inc_mode) + +        self.access_modes[index] = inc_mode + +        return True + +    #### /UPDATE ##### + +    #### FIND #### +    def find_access_mode(self, inc_mode): +        ''' find a user ''' +        index = None +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return index + +        return index diff --git a/roles/lib_openshift/src/lib/user.py b/roles/lib_openshift/src/lib/user.py new file mode 100644 index 000000000..a14d5fc91 --- /dev/null +++ b/roles/lib_openshift/src/lib/user.py @@ -0,0 +1,37 @@ +# pylint: skip-file +# flake8: noqa + + +class UserConfig(object): +    ''' Handle user options ''' +    def __init__(self, +                 kubeconfig, +                 username, +                 full_name): +        ''' constructor for handling user options ''' +        self.kubeconfig = kubeconfig +        self.username = username +        self.full_name = full_name + +        self.data = {} +        self.create_dict() + +    def create_dict(self): +        ''' return a user as a dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['fullName'] = self.full_name +        self.data['groups'] = None +        self.data['identities'] = None +        self.data['kind'] = 'User' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.username + + +# pylint: disable=too-many-instance-attributes +class User(Yedit): +    ''' Class to wrap the oc command line tools ''' +    kind = 'user' + +    def __init__(self, content): +        '''User constructor''' +        super(User, self).__init__(content=content) diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index 47951b48a..135e2b752 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -79,6 +79,16 @@ oc_atomic_container.py:  - doc/atomic_container  - ansible/oc_atomic_container.py +oc_configmap.py: +- doc/generated +- doc/license +- lib/import.py +- doc/configmap +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- class/oc_configmap.py +- ansible/oc_configmap.py +  oc_edit.py:  - doc/generated  - doc/license @@ -163,6 +173,17 @@ oc_project.py:  - class/oc_project.py  - ansible/oc_project.py +oc_pvc.py: +- doc/generated +- doc/license +- lib/import.py +- doc/pvc +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/pvc.py +- class/oc_pvc.py +- ansible/oc_pvc.py +  oc_route.py:  - doc/generated  - doc/license @@ -230,6 +251,17 @@ oc_service.py:  - class/oc_service.py  - ansible/oc_service.py +oc_user.py: +- doc/generated +- doc/license +- lib/import.py +- doc/user +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/user.py +- class/oc_user.py +- ansible/oc_user.py +  oc_version.py:  - doc/generated  - doc/license diff --git a/roles/lib_openshift/src/test/integration/oc_configmap.yml b/roles/lib_openshift/src/test/integration/oc_configmap.yml new file mode 100755 index 000000000..c0d200e73 --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_configmap.yml @@ -0,0 +1,95 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +## ./oc_configmap.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER +--- +- hosts: "{{ cli_master_test }}" +  gather_facts: no +  user: root +  vars: +    filename: /tmp/test_configmap_from_file + +  post_tasks: +  - name: Setup a file with known contents +    copy: +      content: This is a file +      dest: "{{ filename }}" + +  - name: create a test project +    oc_project: +      name: test +      description: for tests only + +  ###### create test ########### +  - name: create a configmap +    oc_configmap: +      state: present +      name: configmaptest +      namespace: test +      from_file: +        config: "{{ filename }}" +      from_literal: +        foo: bar + +  - name: fetch the created configmap +    oc_configmap: +      name: configmaptest +      state: list +      namespace: test +    register: cmout + +  - debug: var=cmout + +  - name: assert configmaptest exists +    assert: +      that: +      - cmout.results.results[0].metadata.name == 'configmaptest' +      - cmout.results.results[0].data.foo == 'bar' +  ###### end create test ########### + +  ###### update test ########### +  - name: create a configmap +    oc_configmap: +      state: present +      name: configmaptest +      namespace: test +      from_file: +        config: "{{ filename }}" +      from_literal: +        foo: notbar +        deployment_type: online + +  - name: fetch the updated configmap +    oc_configmap: +      name: configmaptest +      state: list +      namespace: test +    register: cmout + +  - debug: var=cmout + +  - name: assert configmaptest exists +    assert: +      that: +      - cmout.results.results[0].metadata.name == 'configmaptest' +      - cmout.results.results[0].data.deployment_type == 'online' +      - cmout.results.results[0].data.foo == 'notbar' +  ###### end update test ########### + +  ###### delete test ########### +  - name: delete a configmap +    oc_configmap: +      state: absent +      name: configmaptest +      namespace: test + +  - name: fetch the updated configmap +    oc_configmap: +      name: configmaptest +      state: list +      namespace: test +    register: cmout + +  - debug: var=cmout + +  - name: assert configmaptest exists +    assert: +      that: "'\"configmaptest\" not found' in cmout.results.stderr" diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml new file mode 100755 index 000000000..ad1f9d188 --- /dev/null +++ b/roles/lib_openshift/src/test/integration/oc_user.yml @@ -0,0 +1,240 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# +# ./oc_user.yml -e "cli_master_test=$OPENSHIFT_MASTER +# +--- +- hosts: "{{ cli_master_test }}" +  gather_facts: no +  user: root + +  vars: +    test_user: testuser@email.com +    test_user_fullname: "Test User" +  pre_tasks: +  - name: ensure needed vars are defined +    fail: +      msg: "{{ item }} no defined" +    when: "{{ item}} is not defined" +    with_items: +    - cli_master_test  # ansible inventory instance to run playbook against + +  tasks: +  - name: delete test user (so future tests work) +    oc_user: +      state: absent +      username: "{{ test_user }}" + +  - name: get user list +    oc_user: +      state: list +      username: "{{ test_user }}" +    register: user_out +  - name: "assert test user does not exist" +    assert: +      that: user_out['results'][0] == {} +      msg: "{{ user_out }}" + +  - name: get all list +    oc_user: +      state: list +    register: user_out +  #- debug: var=user_out + +  - name: add test user +    oc_user: +      state: present +      username: "{{ test_user }}" +      full_name: "{{ test_user_fullname }}" +    register: user_out +  - name: assert result set to changed +    assert: +      that: user_out['changed'] == True +      msg: "{{ user_out }}" + +  - name: check test user actually added +    oc_user: +      state: list +      username: "{{ test_user }}" +    register: user_out +  - name: assert user actually added +    assert: +      that: user_out['results'][0]['metadata']['name'] == "{{ test_user }}" and +            user_out['results'][0]['fullName'] == "{{ test_user_fullname }}" +      msg: "{{ user_out }}" + +  - name: re-add test user +    oc_user: +      state: present +      username: "{{ test_user }}" +      full_name: "{{ test_user_fullname }}" +    register: user_out +  - name: assert re-add result set to not changed +    assert: +      that: user_out['changed'] == False +      msg: "{{ user_out }}" + +  - name: modify existing user +    oc_user: +      state: present +      username: "{{ test_user }}" +      full_name: 'Something Different' +    register: user_out +  - name: assert modify existing user result set to changed +    assert: +      that: user_out['changed'] == True +      msg: "{{ user_out }}" + +  - name: check modify test user +    oc_user: +      state: list +      username: "{{ test_user }}" +    register: user_out +  - name: assert modification successful +    assert: +      that: user_out['results'][0]['metadata']['name'] == "{{ test_user }}" and +            user_out['results'][0]['fullName'] == 'Something Different' +      msg: "{{ user_out }}" + +  - name: delete test user +    oc_user: +      state: absent +      username: "{{ test_user }}" +    register: user_out +  - name: assert delete marked changed +    assert: +      that: user_out['changed'] == True +      msg: "{{ user_out }}" + +  - name: check delete user +    oc_user: +      state: list +      username: "{{ test_user }}" +    register: user_out +  - name: assert deletion successful +    assert: +      that: user_out['results'][0] == {} +      msg: "{{ user_out }}" + +  - name: re-delete test user +    oc_user: +      state: absent +      username: "{{ test_user }}" +    register: user_out +  - name: check re-delete marked not changed +    assert: +      that: user_out['changed'] == False +      msg: "{{ user_out }}" + +  - name: delete test group +    oc_obj: +      kind: group +      state: absent +      name: integration-test-group + +  - name: create test group +    command: oadm groups new integration-test-group + +  - name: check group creation +    oc_obj: +      kind: group +      state: list +      name: integration-test-group +    register: user_out +  - name: assert test group created +    assert: +      that: user_out['results']['results'][0]['metadata']['name'] == "integration-test-group" +      msg: "{{ user_out }}" + +  - name: create user with group membership +    oc_user: +      state: present +      username: "{{ test_user }}" +      groups: +      - "integration-test-group" +    register: user_out +  - debug: var=user_out +  - name: get group user members +    oc_obj: +      kind: group +      state: list +      name: integration-test-group +    register: user_out +  - name: assert user group membership +    assert: +      that: "'{{ test_user }}' in user_out['results']['results'][0]['users'][0]" +      msg: "{{ user_out }}" + +  - name: delete second test group +    oc_obj: +      kind: group +      state: absent +      name: integration-test-group2 + +  - name: create empty second group +    command: oadm groups new integration-test-group2 + +  - name: update user with second group membership +    oc_user: +      state: present +      username: "{{ test_user }}" +      groups: +      - "integration-test-group" +      - "integration-test-group2" +    register: user_out +  - name: assert adding more group changed +    assert: +      that: user_out['changed'] == True + +  - name: get group memberships +    oc_obj: +      kind: group +      state: list +      name: "{{ item }}" +    with_items: +    - integration-test-group +    - integration-test-group2 +    register: user_out +  - name: assert user member of above groups +    assert: +      that: "'{{ test_user }}' in user_out['results'][0]['results']['results'][0]['users'] and \ +            '{{ test_user }}' in user_out['results'][1]['results']['results'][0]['users']" +      msg: "{{ user_out }}" + +  - name: update user with only one group +    oc_user: +      state: present +      username: "{{ test_user }}" +      groups: +      - "integration-test-group2" +    register: user_out +  - assert: +      that: user_out['changed'] == True + +  - name: get group memberships +    oc_obj: +      kind: group +      state: list +      name: "{{ item }}" +    with_items: +    - "integration-test-group" +    - "integration-test-group2" +    register: user_out +  - debug: var=user_out +  - name: assert proper user membership +    assert: +      that: "'{{ test_user }}' not in user_out['results'][0]['results']['results'][0]['users'] and \ +             '{{ test_user }}' in user_out['results'][1]['results']['results'][0]['users']" + +  - name: clean up test groups +    oc_obj: +      kind: group +      state: absent +      name: "{{ item }}" +    with_items: +    - "integration-test-group" +    - "integration-test-group2" + +  - name: clean up test user +    oc_user: +      state: absent +      username: "{{ test_user }}" diff --git a/roles/lib_openshift/src/test/unit/test_oc_configmap.py b/roles/lib_openshift/src/test/unit/test_oc_configmap.py new file mode 100755 index 000000000..318fd6167 --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_configmap.py @@ -0,0 +1,239 @@ +''' + Unit tests for oc configmap +''' + +import copy +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error,wrong-import-position +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_configmap import OCConfigMap, locate_oc_binary  # noqa: E402 + + +class OCConfigMapTest(unittest.TestCase): +    ''' +     Test class for OCConfigMap +    ''' +    params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig', +              'state': 'present', +              'debug': False, +              'name': 'configmap', +              'from_file': {}, +              'from_literal': {}, +              'namespace': 'test'} + +    @mock.patch('oc_configmap.Utils._write') +    @mock.patch('oc_configmap.Utils.create_tmpfile_copy') +    @mock.patch('oc_configmap.OCConfigMap._run') +    def test_create_configmap(self, mock_run, mock_tmpfile_copy, mock_write): +        ''' Testing a configmap create ''' +        # TODO +        return +        params = copy.deepcopy(OCConfigMapTest.params) +        params['from_file'] = {'test': '/root/file'} +        params['from_literal'] = {'foo': 'bar'} + +        configmap = '''{ +                "apiVersion": "v1", +                "data": { +                    "foo": "bar", +                    "test": "this is a file\\n" +                }, +                "kind": "ConfigMap", +                "metadata": { +                    "creationTimestamp": "2017-03-20T20:24:35Z", +                    "name": "configmap", +                    "namespace": "test" +                } +            }''' + +        mock_run.side_effect = [ +            (1, '', 'Error from server (NotFound): configmaps "configmap" not found'), +            (0, '', ''), +            (0, configmap, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCConfigMap.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap') + +    @mock.patch('oc_configmap.Utils._write') +    @mock.patch('oc_configmap.Utils.create_tmpfile_copy') +    @mock.patch('oc_configmap.OCConfigMap._run') +    def test_update_configmap(self, mock_run, mock_tmpfile_copy, mock_write): +        ''' Testing a configmap create ''' +        params = copy.deepcopy(OCConfigMapTest.params) +        params['from_file'] = {'test': '/root/file'} +        params['from_literal'] = {'foo': 'bar', 'deployment_type': 'online'} + +        configmap = '''{ +                "apiVersion": "v1", +                "data": { +                    "foo": "bar", +                    "test": "this is a file\\n" +                }, +                "kind": "ConfigMap", +                "metadata": { +                    "creationTimestamp": "2017-03-20T20:24:35Z", +                    "name": "configmap", +                    "namespace": "test" + +                } +            }''' + +        mod_configmap = '''{ +                "apiVersion": "v1", +                "data": { +                    "foo": "bar", +                    "deployment_type": "online", +                    "test": "this is a file\\n" +                }, +                "kind": "ConfigMap", +                "metadata": { +                    "creationTimestamp": "2017-03-20T20:24:35Z", +                    "name": "configmap", +                    "namespace": "test" + +                } +            }''' + +        mock_run.side_effect = [ +            (0, configmap, ''), +            (0, mod_configmap, ''), +            (0, configmap, ''), +            (0, '', ''), +            (0, mod_configmap, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCConfigMap.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertEqual(results['results']['results'][0]['metadata']['name'], 'configmap') +        self.assertEqual(results['results']['results'][0]['data']['deployment_type'], 'online') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py new file mode 100755 index 000000000..82187917d --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py @@ -0,0 +1,366 @@ +''' + Unit tests for oc pvc +''' + +import copy +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error,wrong-import-position +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_pvc import OCPVC, locate_oc_binary  # noqa: E402 + + +class OCPVCTest(unittest.TestCase): +    ''' +     Test class for OCPVC +    ''' +    params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig', +              'state': 'present', +              'debug': False, +              'name': 'mypvc', +              'namespace': 'test', +              'volume_capacity': '1G', +              'access_modes': 'ReadWriteMany'} + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_create_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'), +            (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'), +            (0, '', ''), +            (0, pvc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc') + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_update_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) +        params['access_modes'] = 'ReadWriteMany' + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mod_pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteMany" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (0, pvc, ''), +            (0, pvc, ''), +            (0, '', ''), +            (0, mod_pvc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertFalse(results['changed']) +        self.assertEqual(results['results']['msg'], '##### - This volume is currently bound.  Will not update - ####') + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_delete_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) +        params['state'] = 'absent' + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (0, pvc, ''), +            (0, '', ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertTrue(results['changed']) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/lib_openshift/src/test/unit/test_oc_user.py b/roles/lib_openshift/src/test/unit/test_oc_user.py new file mode 100755 index 000000000..f7a17cc2c --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_user.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python2 +''' + Unit tests for oc user +''' +# To run +# ./oc_user.py +# +# .. +# ---------------------------------------------------------------------- +# Ran 2 tests in 0.003s +# +# OK + +import os +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_user import OCUser  # noqa: E402 + + +class OCUserTest(unittest.TestCase): +    ''' +     Test class for OCUser +    ''' + +    def setUp(self): +        ''' setup method will create a file and set to known configuration ''' +        pass + +    @mock.patch('oc_user.Utils.create_tmpfile_copy') +    @mock.patch('oc_user.OCUser._run') +    def test_state_list(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a user list ''' +        params = {'username': 'testuser@email.com', +                  'state': 'list', +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'full_name': None, +                  'groups': [], +                  'debug': False} + +        user = '''{ +               "kind": "User", +               "apiVersion": "v1", +               "metadata": { +                   "name": "testuser@email.com", +                   "selfLink": "/oapi/v1/users/testuser@email.com", +                   "uid": "02fee6c9-f20d-11e6-b83b-12e1a7285e80", +                   "resourceVersion": "38566887", +                   "creationTimestamp": "2017-02-13T16:53:58Z" +               }, +               "fullName": "Test User", +               "identities": null, +               "groups": null +           }''' + +        mock_cmd.side_effect = [ +            (0, user, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCUser.run_ansible(params, False) + +        self.assertFalse(results['changed']) +        self.assertTrue(results['results'][0]['metadata']['name'] == "testuser@email.com") + +    @mock.patch('oc_user.Utils.create_tmpfile_copy') +    @mock.patch('oc_user.OCUser._run') +    def test_state_present(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a user list ''' +        params = {'username': 'testuser@email.com', +                  'state': 'present', +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'full_name': 'Test User', +                  'groups': [], +                  'debug': False} + +        created_user = '''{ +                          "kind": "User", +                          "apiVersion": "v1", +                          "metadata": { +                              "name": "testuser@email.com", +                              "selfLink": "/oapi/v1/users/testuser@email.com", +                              "uid": "8d508039-f224-11e6-b83b-12e1a7285e80", +                              "resourceVersion": "38646241", +                              "creationTimestamp": "2017-02-13T19:42:28Z" +                          }, +                          "fullName": "Test User", +                          "identities": null, +                          "groups": null +                      }''' + +        mock_cmd.side_effect = [ +            (1, '', 'Error from server: users "testuser@email.com" not found'),  # get +            (1, '', 'Error from server: users "testuser@email.com" not found'),  # get +            (0, 'user "testuser@email.com" created', ''),  # create +            (0, created_user, ''),  # get +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCUser.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertTrue(results['results']['results'][0]['metadata']['name'] == +                        "testuser@email.com") + +    def tearDown(self): +        '''TearDown method''' +        pass + + +if __name__ == "__main__": +    unittest.main() diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index e3cc3a9b4..0a2d3005f 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -31,6 +31,8 @@ mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-stream  mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/  find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;  wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json         -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json +wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json           -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json +wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json    -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json  wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml                            -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml  wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml                        -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml  wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml       -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json index 0d5ac21d8..857ffa980 100644 --- a/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json @@ -27,8 +27,9 @@                            "iconClass": "icon-dotnet",                            "tags": "builder,.net,dotnet,dotnetcore",                            "supports":"dotnet", -                          "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git", -                          "sampleContextDir": "1.1/test/asp-net-hello-world" +                          "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git", +                          "sampleContextDir": "app", +                          "sampleRef": "dotnetcore-1.1"                          },                          "from": {                            "kind": "ImageStreamTag", @@ -43,8 +44,9 @@                              "iconClass": "icon-dotnet",                              "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",                              "supports":"dotnet:1.1,dotnet", -                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git", -                            "sampleContextDir": "1.1/test/asp-net-hello-world", +                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git", +                            "sampleContextDir": "app", +                            "sampleRef": "dotnetcore-1.1",                              "version": "1.1"                          },                          "from": { @@ -60,8 +62,9 @@                              "iconClass": "icon-dotnet",                              "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",                              "supports":"dotnet:1.0,dotnet", -                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git", -                            "sampleContextDir": "1.0/test/asp-net-hello-world", +                            "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git", +                            "sampleContextDir": "app", +                            "sampleRef": "dotnetcore-1.0",                              "version": "1.0"                          },                          "from": { diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json new file mode 100644 index 000000000..a09d71a00 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-example.json @@ -0,0 +1,333 @@ +{ +    "kind": "Template", +    "apiVersion": "v1", +    "metadata": { +        "name": "dotnet-example", +        "annotations": { +            "openshift.io/display-name": ".NET Core", +            "description": "An example .NET Core application.", +            "tags": "quickstart,dotnet,.net", +            "iconClass": "icon-dotnet", +            "template.openshift.io/provider-display-name": "Red Hat, Inc.", +            "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore", +            "template.openshift.io/support-url": "https://access.redhat.com" +        } +    }, +    "objects": [ +        { +            "kind": "Route", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}" +            }, +            "spec": { +                "host": "${APPLICATION_DOMAIN}", +                "to": { +                    "kind": "Service", +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Exposes and load balances the application pods" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "web", +                        "port": 8080, +                        "targetPort": 8080 +                    } +                ], +                "selector": { +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Keeps track of changes in the application image" +                } +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Defines how to build the application" +                } +            }, +            "spec": { +                "source": { +                    "type": "Git", +                    "git": { +                        "uri": "${SOURCE_REPOSITORY_URL}", +                        "ref": "${SOURCE_REPOSITORY_REF}" +                    }, +                    "contextDir": "${CONTEXT_DIR}" +                }, +                "strategy": { +                    "type": "Source", +                    "sourceStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_IMAGE_STREAM_TAG}" +                        }, +                        "env": [ +                            { +                                "name": "DOTNET_STARTUP_PROJECT", +                                "value": "${DOTNET_STARTUP_PROJECT}" +                            }, +                            { +                                "name": "DOTNET_ASSEMBLY_NAME", +                                "value": "${DOTNET_ASSEMBLY_NAME}" +                            }, +                            { +                                "name": "DOTNET_NPM_TOOLS", +                                "value": "${DOTNET_NPM_TOOLS}" +                            }, +                            { +                                "name": "DOTNET_TEST_PROJECTS", +                                "value": "${DOTNET_TEST_PROJECTS}" +                            }, +                            { +                                "name": "DOTNET_CONFIGURATION", +                                "value": "${DOTNET_CONFIGURATION}" +                            }, +                            { +                                "name": "DOTNET_PUBLISH", +                                "value": "true" +                            }, +                            { +                                "name": "DOTNET_RESTORE_SOURCES", +                                "value": "${DOTNET_RESTORE_SOURCES}" +                            } +                        ] +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "GitHub", +                        "github": { +                            "secret": "${GITHUB_WEBHOOK_SECRET}" +                        } +                    }, +                    { +                        "type": "Generic", +                        "generic": { +                            "secret": "${GENERIC_WEBHOOK_SECRET}" +                        } +                    } +                ] +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Defines how to deploy the application server" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Rolling" +                }, +                "triggers": [ +                    { +                        "type": "ImageChange", +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "dotnet-app" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}:latest" +                            } +                        } +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 1, +                "selector": { +                    "name": "${NAME}" +                }, +                "template": { +                    "metadata": { +                        "name": "${NAME}", +                        "labels": { +                            "name": "${NAME}" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "name": "dotnet-app", +                                "image": " ", +                                "ports": [ +                                    { +                                        "containerPort": 8080 +                                    } +                                ], +                                "livenessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 40, +                                    "timeoutSeconds": 15 +                                }, +                                "readinessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 10, +                                    "timeoutSeconds": 30 +                                }, +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_LIMIT}" +                                    } +                                }, +                                "env": [] +                            } +                        ] +                    } +                } +            } +        } +    ], +    "parameters": [ +        { +            "name": "NAME", +            "displayName": "Name", +            "description": "The name assigned to all of the frontend objects defined in this template.", +            "required": true, +            "value": "dotnet-example" +        }, +        { +            "name": "MEMORY_LIMIT", +            "displayName": "Memory Limit", +            "description": "Maximum amount of memory the container can use.", +            "required": true, +            "value": "512Mi" +        }, +        { +            "name": "DOTNET_IMAGE_STREAM_TAG", +            "displayName": ".NET builder", +            "required": true, +            "description": "The image stream tag which is used to build the code.", +            "value": "dotnet:1.0" +        }, +        { +            "name": "NAMESPACE", +            "displayName": "Namespace", +            "description": "The OpenShift Namespace where the ImageStream resides.", +            "required": true, +            "value": "openshift" +        }, +        { +            "name": "SOURCE_REPOSITORY_URL", +            "displayName": "Git Repository URL", +            "description": "The URL of the repository with your application source code.", +            "required": true, +            "value": "https://github.com/redhat-developer/s2i-dotnetcore-ex.git" +        }, +        { +            "name": "SOURCE_REPOSITORY_REF", +            "displayName": "Git Reference", +            "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", +            "value": "dotnetcore-1.0" +        }, +        { +            "name": "CONTEXT_DIR", +            "displayName": "Context Directory", +            "description": "Set this to use a subdirectory of the source code repository" +        }, +        { +            "name": "APPLICATION_DOMAIN", +            "displayName": "Application Hostname", +            "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.", +            "value": "" +        }, +        { +            "name": "GITHUB_WEBHOOK_SECRET", +            "displayName": "GitHub Webhook Secret", +            "description": "A secret string used to configure the GitHub webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "GENERIC_WEBHOOK_SECRET", +            "displayName": "Generic Webhook Secret", +            "description": "A secret string used to configure the Generic webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "DOTNET_STARTUP_PROJECT", +            "displayName": "Startup Project", +            "description": "Set this to the folder containing your startup project.", +            "value": "app" +        }, +        { +            "name": "DOTNET_ASSEMBLY_NAME", +            "displayName": "Startup Assembly", +            "description": "Set this when the assembly name is overridden in the project file." +        }, +        { +            "name": "DOTNET_NPM_TOOLS", +            "displayName": "Npm Tools", +            "description": "Set this to a space separated list of npm tools needed to publish.", +            "value": "bower gulp" +        }, +        { +            "name": "DOTNET_TEST_PROJECTS", +            "displayName": "Test projects", +            "description": "Set this to a space separated list of test projects to run before publishing." +        }, +        { +            "name": "DOTNET_CONFIGURATION", +            "displayName": "Configuration", +            "description": "Set this to configuration (Release/Debug).", +            "value": "Release" +        }, +        { +            "name": "DOTNET_RESTORE_SOURCES", +            "displayName": "NuGet package sources", +            "description": "Set this to override the NuGet.config sources." +        } +    ] +} diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json new file mode 100644 index 000000000..fa31f7f61 --- /dev/null +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json @@ -0,0 +1,544 @@ +{ +    "kind": "Template", +    "apiVersion": "v1", +    "metadata": { +        "name": "dotnet-pgsql-persistent", +        "annotations": { +            "openshift.io/display-name": ".NET Core + PostgreSQL (Persistent)", +            "description": "An example .NET Core application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.", +            "tags": "quickstart,dotnet", +            "iconClass": "icon-dotnet", +            "template.openshift.io/provider-display-name": "Red Hat, Inc.", +            "template.openshift.io/documentation-url": "https://github.com/redhat-developer/s2i-dotnetcore", +            "template.openshift.io/support-url": "https://access.redhat.com" +        } +    }, +    "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore.", +    "labels": { +        "template": "dotnet-pgsql-persistent" +    }, +    "objects": [ +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Exposes and load balances the application pods", +                    "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "web", +                        "port": 8080, +                        "targetPort": 8080 +                    } +                ], +                "selector": { +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "Route", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}" +            }, +            "spec": { +                "host": "${APPLICATION_DOMAIN}", +                "to": { +                    "kind": "Service", +                    "name": "${NAME}" +                } +            } +        }, +        { +            "kind": "ImageStream", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Keeps track of changes in the application image" +                } +            } +        }, +        { +            "kind": "BuildConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Defines how to build the application" +                } +            }, +            "spec": { +                "source": { +                    "type": "Git", +                    "git": { +                        "uri": "${SOURCE_REPOSITORY_URL}", +                        "ref": "${SOURCE_REPOSITORY_REF}" +                    }, +                    "contextDir": "${CONTEXT_DIR}" +                }, +                "strategy": { +                    "type": "Source", +                    "sourceStrategy": { +                        "from": { +                            "kind": "ImageStreamTag", +                            "namespace": "${NAMESPACE}", +                            "name": "${DOTNET_IMAGE_STREAM_TAG}" +                        }, +                        "env": [ +                            { +                                "name": "DOTNET_STARTUP_PROJECT", +                                "value": "${DOTNET_STARTUP_PROJECT}" +                            }, +                            { +                                "name": "DOTNET_ASSEMBLY_NAME", +                                "value": "${DOTNET_ASSEMBLY_NAME}" +                            }, +                            { +                                "name": "DOTNET_NPM_TOOLS", +                                "value": "${DOTNET_NPM_TOOLS}" +                            }, +                            { +                                "name": "DOTNET_TEST_PROJECTS", +                                "value": "${DOTNET_TEST_PROJECTS}" +                            }, +                            { +                                "name": "DOTNET_CONFIGURATION", +                                "value": "${DOTNET_CONFIGURATION}" +                            }, +                            { +                                "name": "DOTNET_PUBLISH", +                                "value": "true" +                            }, +                            { +                                "name": "DOTNET_RESTORE_SOURCES", +                                "value": "${DOTNET_RESTORE_SOURCES}" +                            } +                        ] +                    } +                }, +                "output": { +                    "to": { +                        "kind": "ImageStreamTag", +                        "name": "${NAME}:latest" +                    } +                }, +                "triggers": [ +                    { +                        "type": "ImageChange" +                    }, +                    { +                        "type": "ConfigChange" +                    }, +                    { +                        "type": "GitHub", +                        "github": { +                            "secret": "${GITHUB_WEBHOOK_SECRET}" +                        } +                    } +                ], +                "postCommit": {} +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${NAME}", +                "annotations": { +                    "description": "Defines how to deploy the application server" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Rolling", +                    "rollingParams": { +                        "updatePeriodSeconds": 1, +                        "intervalSeconds": 1, +                        "timeoutSeconds": 600, +                        "maxUnavailable": "25%", +                        "maxSurge": "25%" +                    }, +                    "resources": {} +                }, +                "triggers": [ +                    { +                        "type": "ImageChange", +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "dotnet-pgsql-persistent" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "name": "${NAME}:latest" +                            } +                        } +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 1, +                "selector": { +                    "name": "${NAME}" +                }, +                "template": { +                    "metadata": { +                        "name": "${NAME}", +                        "labels": { +                            "name": "${NAME}" +                        } +                    }, +                    "spec": { +                        "containers": [ +                            { +                                "name": "dotnet-pgsql-persistent", +                                "image": " ", +                                "ports": [ +                                    { +                                        "containerPort": 8080 +                                    } +                                ], +                                "env": [ +                                    { +                                        "name": "ConnectionString", +                                        "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" +                                    } +                                ], +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_LIMIT}" +                                    } +                                }, +                                "livenessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 40, +                                    "timeoutSeconds": 10 +                                }, +                                "readinessProbe": { +                                    "httpGet": { +                                        "path": "/", +                                        "port": 8080, +                                        "scheme": "HTTP" +                                    }, +                                    "initialDelaySeconds": 10, +                                    "timeoutSeconds": 30 +                                } +                            } +                        ] +                    } +                } +            } +        }, +        { +            "kind": "PersistentVolumeClaim", +            "apiVersion": "v1", +            "metadata": { +                "name": "${DATABASE_SERVICE_NAME}" +            }, +            "spec": { +                "accessModes": [ +                    "ReadWriteOnce" +                ], +                "resources": { +                    "requests": { +                        "storage": "${VOLUME_CAPACITY}" +                    } +                } +            } +        }, +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "${DATABASE_SERVICE_NAME}", +                "annotations": { +                    "description": "Exposes the database server" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "postgresql", +                        "port": 5432, +                        "targetPort": 5432 +                    } +                ], +                "selector": { +                    "name": "${DATABASE_SERVICE_NAME}" +                } +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "${DATABASE_SERVICE_NAME}", +                "annotations": { +                    "description": "Defines how to deploy the database" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Recreate" +                }, +                "triggers": [ +                    { +                        "type": "ImageChange", +                        "imageChangeParams": { +                            "automatic": true, +                            "containerNames": [ +                                "postgresql" +                            ], +                            "from": { +                                "kind": "ImageStreamTag", +                                "namespace": "openshift", +                                "name": "postgresql:9.5" +                            } +                        } +                    }, +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 1, +                "selector": { +                    "name": "${DATABASE_SERVICE_NAME}" +                }, +                "template": { +                    "metadata": { +                        "name": "${DATABASE_SERVICE_NAME}", +                        "labels": { +                            "name": "${DATABASE_SERVICE_NAME}" +                        } +                    }, +                    "spec": { +                        "volumes": [ +                            { +                                "name": "${DATABASE_SERVICE_NAME}-data", +                                "persistentVolumeClaim": { +                                    "claimName": "${DATABASE_SERVICE_NAME}" +                                } +                            } +                        ], +                        "containers": [ +                            { +                                "name": "postgresql", +                                "image": " ", +                                "ports": [ +                                    { +                                        "containerPort": 5432 +                                    } +                                ], +                                "readinessProbe": { +                                    "timeoutSeconds": 1, +                                    "initialDelaySeconds": 5, +                                    "exec": { +                                        "command": [ +                                            "/bin/sh", +                                            "-i", +                                            "-c", +                                            "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'" +                                        ] +                                    } +                                }, +                                "livenessProbe": { +                                    "timeoutSeconds": 1, +                                    "initialDelaySeconds": 30, +                                    "tcpSocket": { +                                        "port": 5432 +                                    } +                                }, +                                "volumeMounts": [ +                                    { +                                        "name": "${DATABASE_SERVICE_NAME}-data", +                                        "mountPath": "/var/lib/pgsql/data" +                                    } +                                ], +                                "env": [ +                                    { +                                        "name": "POSTGRESQL_USER", +                                        "value": "${DATABASE_USER}" +                                    }, +                                    { +                                        "name": "POSTGRESQL_PASSWORD", +                                        "value": "${DATABASE_PASSWORD}" +                                    }, +                                    { +                                        "name": "POSTGRESQL_DATABASE", +                                        "value": "${DATABASE_NAME}" +                                    }, +                                    { +                                        "name": "POSTGRESQL_MAX_CONNECTIONS", +                                        "value": "${POSTGRESQL_MAX_CONNECTIONS}" +                                    }, +                                    { +                                        "name": "POSTGRESQL_SHARED_BUFFERS", +                                        "value": "${POSTGRESQL_SHARED_BUFFERS}" +                                    } +                                ], +                                "resources": { +                                    "limits": { +                                        "memory": "${MEMORY_POSTGRESQL_LIMIT}" +                                    } +                                } +                            } +                        ] +                    } +                } +            } +        } +    ], +    "parameters": [ +        { +            "name": "NAME", +            "displayName": "Name", +            "description": "The name assigned to all of the frontend objects defined in this template.", +            "required": true, +            "value": "musicstore" +        }, +        { +            "name": "MEMORY_LIMIT", +            "displayName": "Memory Limit", +            "required": true, +            "description": "Maximum amount of memory the .NET Core container can use.", +            "value": "512Mi" +        }, +        { +            "name": "MEMORY_POSTGRESQL_LIMIT", +            "displayName": "Memory Limit (PostgreSQL)", +            "required": true, +            "description": "Maximum amount of memory the PostgreSQL container can use.", +            "value": "512Mi" +        }, +        { +            "name": "VOLUME_CAPACITY", +            "displayName": "Volume Capacity", +            "description": "Volume space available for data, e.g. 512Mi, 2Gi", +            "value": "1Gi", +            "required": true +        }, +        { +            "name": "DOTNET_IMAGE_STREAM_TAG", +            "displayName": ".NET builder", +            "required": true, +            "description": "The image stream tag which is used to build the code.", +            "value": "dotnet:1.1" +        }, +        { +            "name": "NAMESPACE", +            "displayName": "Namespace", +            "required": true, +            "description": "The OpenShift Namespace where the .NET builder ImageStream resides.", +            "value": "openshift" +        }, +        { +            "name": "SOURCE_REPOSITORY_URL", +            "displayName": "Git Repository URL", +            "required": true, +            "description": "The URL of the repository with your application source code.", +            "value": "https://github.com/redhat-developer/s2i-aspnet-musicstore-ex.git" +        }, +        { +            "name": "SOURCE_REPOSITORY_REF", +            "displayName": "Git Reference", +            "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.", +            "value": "rel/1.1-example" +        }, +        { +            "name": "CONTEXT_DIR", +            "displayName": "Context Directory", +            "description": "Set this to the relative path to your project if it is not in the root of your repository." +        }, +        { +            "name": "DOTNET_STARTUP_PROJECT", +            "displayName": "Startup Project", +            "description": "Set this to the folder containing your startup project.", +            "value": "samples/MusicStore" +        }, +        { +            "name": "DOTNET_ASSEMBLY_NAME", +            "displayName": "Startup Assembly", +            "description": "Set this when the assembly name is overridden in the project file." +        }, +        { +            "name": "DOTNET_NPM_TOOLS", +            "displayName": "Npm Tools", +            "description": "Set this to a space separated list of npm tools needed to publish." +        }, +        { +            "name": "DOTNET_TEST_PROJECTS", +            "displayName": "Test projects", +            "description": "Set this to a space separated list of test projects to run before publishing." +        }, +        { +            "name": "DOTNET_CONFIGURATION", +            "displayName": "Configuration", +            "description": "Set this to configuration (Release/Debug).", +            "value": "Release" +        }, +        { +            "name": "DOTNET_RESTORE_SOURCES", +            "displayName": "NuGet package sources", +            "description": "Set this to override the NuGet.config sources." +        }, +        { +            "name": "APPLICATION_DOMAIN", +            "displayName": "Application Hostname", +            "description": "The exposed hostname that will route to the .NET Core service, if left blank a value will be defaulted.", +            "value": "" +        }, +        { +            "name": "GITHUB_WEBHOOK_SECRET", +            "displayName": "GitHub Webhook Secret", +            "description": "A secret string used to configure the GitHub webhook.", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{40}" +        }, +        { +            "name": "DATABASE_SERVICE_NAME", +            "required": true, +            "displayName": "Database Service Name", +            "value": "postgresql" +        }, +        { +            "name": "DATABASE_USER", +            "displayName": "Database Username", +            "generate": "expression", +            "from": "user[A-Z0-9]{3}" +        }, +        { +            "name": "DATABASE_PASSWORD", +            "displayName": "Database Password", +            "generate": "expression", +            "from": "[a-zA-Z0-9]{8}" +        }, +        { +            "name": "DATABASE_NAME", +            "required": true, +            "displayName": "Database Name", +            "value": "musicstore" +        }, +        { +            "name": "POSTGRESQL_MAX_CONNECTIONS", +            "displayName": "Maximum Database Connections", +            "value": "100" +        }, +        { +            "name": "POSTGRESQL_SHARED_BUFFERS", +            "displayName": "Shared Buffer Amount", +            "value": "12MB" +        } +    ] +} diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 0411797b1..8b23533c8 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -74,6 +74,7 @@ class ActionModule(ActionBase):                  result["failed"] = True                  result["msg"] = "One or more checks failed" +        result["changed"] = any(r.get("changed", False) for r in check_results.values())          return result      def load_known_checks(self): diff --git a/roles/openshift_health_checker/library/docker_container.py b/roles/openshift_health_checker/library/docker_container.py new file mode 100644 index 000000000..f81b4ec01 --- /dev/null +++ b/roles/openshift_health_checker/library/docker_container.py @@ -0,0 +1,2036 @@ +#!/usr/bin/python +# pylint: skip-file +# flake8: noqa + +# TODO: remove this file once openshift-ansible requires ansible >= 2.3. +# This file is a copy of +# https://github.com/ansible/ansible/blob/20bf02f/lib/ansible/modules/cloud/docker/docker_container.py. +# It has been temporarily vendored here due to issue https://github.com/ansible/ansible/issues/22323. + + +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +ANSIBLE_METADATA = {'status': ['preview'], +                    'supported_by': 'committer', +                    'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: docker_container + +short_description: manage docker containers + +description: +  - Manage the life cycle of docker containers. +  - Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken. + +version_added: "2.1" + +options: +  blkio_weight: +    description: +      - Block IO (relative weight), between 10 and 1000. +    default: null +    required: false +  capabilities: +    description: +      - List of capabilities to add to the container. +    default: null +    required: false +  cleanup: +    description: +      - Use with I(detach) to remove the container after successful execution. +    default: false +    required: false +    version_added: "2.2" +  command: +    description: +      - Command to execute when the container starts. +    default: null +    required: false +  cpu_period: +    description: +      - Limit CPU CFS (Completely Fair Scheduler) period +    default: 0 +    required: false +  cpu_quota: +    description: +      - Limit CPU CFS (Completely Fair Scheduler) quota +    default: 0 +    required: false +  cpuset_cpus: +    description: +      - CPUs in which to allow execution C(1,3) or C(1-3). +    default: null +    required: false +  cpuset_mems: +    description: +      - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1) +    default: null +    required: false +  cpu_shares: +    description: +      - CPU shares (relative weight). +    default: null +    required: false +  detach: +    description: +      - Enable detached mode to leave the container running in background. +        If disabled, the task will reflect the status of the container run (failed if the command failed). +    default: true +    required: false +  devices: +    description: +      - "List of host device bindings to add to the container. Each binding is a mapping expressed +        in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>" +    default: null +    required: false +  dns_servers: +    description: +      - List of custom DNS servers. +    default: null +    required: false +  dns_search_domains: +    description: +      - List of custom DNS search domains. +    default: null +    required: false +  env: +    description: +      - Dictionary of key,value pairs. +    default: null +    required: false +  env_file: +    version_added: "2.2" +    description: +      - Path to a file containing environment variables I(FOO=BAR). +      - If variable also present in C(env), then C(env) value will override. +      - Requires docker-py >= 1.4.0. +    default: null +    required: false +  entrypoint: +    description: +      - Command that overwrites the default ENTRYPOINT of the image. +    default: null +    required: false +  etc_hosts: +    description: +      - Dict of host-to-IP mappings, where each host name is a key in the dictionary. +        Each host name will be added to the container's /etc/hosts file. +    default: null +    required: false +  exposed_ports: +    description: +      - List of additional container ports which informs Docker that the container +        listens on the specified network ports at runtime. +        If the port is already exposed using EXPOSE in a Dockerfile, it does not +        need to be exposed again. +    default: null +    required: false +    aliases: +      - exposed +  force_kill: +    description: +      - Use the kill command when stopping a running container. +    default: false +    required: false +  groups: +    description: +      - List of additional group names and/or IDs that the container process will run as. +    default: null +    required: false +  hostname: +    description: +      - Container hostname. +    default: null +    required: false +  ignore_image: +    description: +      - When C(state) is I(present) or I(started) the module compares the configuration of an existing +        container to requested configuration. The evaluation includes the image version. If +        the image version in the registry does not match the container, the container will be +        recreated. Stop this behavior by setting C(ignore_image) to I(True). +    default: false +    required: false +    version_added: "2.2" +  image: +    description: +      - Repository path and tag used to create the container. If an image is not found or pull is true, the image +        will be pulled from the registry. If no tag is included, 'latest' will be used. +    default: null +    required: false +  interactive: +    description: +      - Keep stdin open after a container is launched, even if not attached. +    default: false +    required: false +  ipc_mode: +    description: +      - Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another +        container's IPC namespace or 'host' to use the host's IPC namespace within the container. +    default: null +    required: false +  keep_volumes: +    description: +      - Retain volumes associated with a removed container. +    default: true +    required: false +  kill_signal: +    description: +      - Override default signal used to kill a running container. +    default null: +    required: false +  kernel_memory: +    description: +      - "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g. Minimum is 4M." +    default: 0 +    required: false +  labels: +     description: +       - Dictionary of key value pairs. +     default: null +     required: false +  links: +    description: +      - List of name aliases for linked containers in the format C(container_name:alias) +    default: null +    required: false +  log_driver: +    description: +      - Specify the logging driver. Docker uses json-file by default. +    choices: +      - none +      - json-file +      - syslog +      - journald +      - gelf +      - fluentd +      - awslogs +      - splunk +    default: null +    required: false +  log_options: +    description: +      - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/ +        for details. +    required: false +    default: null +  mac_address: +    description: +      - Container MAC address (e.g. 92:d0:c6:0a:29:33) +    default: null +    required: false +  memory: +    description: +      - "Memory limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g" +    default: 0 +    required: false +  memory_reservation: +    description: +      - "Memory soft limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g" +    default: 0 +    required: false +  memory_swap: +    description: +      - Total memory limit (memory + swap, format:<number>[<unit>]). +        Number is a positive integer. Unit can be one of b, k, m, or g. +    default: 0 +    required: false +  memory_swappiness: +    description: +        - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. +    default: 0 +    required: false +  name: +    description: +      - Assign a name to a new container or match an existing container. +      - When identifying an existing container name may be a name or a long or short container ID. +    required: true +  network_mode: +    description: +      - Connect the container to a network. +    choices: +      - bridge +      - container:<name|id> +      - host +      - none +    default: null +    required: false +  networks: +     description: +       - List of networks the container belongs to. +       - Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases). +       - For each network C(name) is required, all other keys are optional. +       - If included, C(links) or C(aliases) are lists. +       - For examples of the data structure and usage see EXAMPLES below. +       - To remove a container from one or more networks, use the C(purge_networks) option. +     default: null +     required: false +     version_added: "2.2" +  oom_killer: +    description: +      - Whether or not to disable OOM Killer for the container. +    default: false +    required: false +  oom_score_adj: +    description: +      - An integer value containing the score given to the container in order to tune OOM killer preferences. +    default: 0 +    required: false +    version_added: "2.2" +  paused: +    description: +      - Use with the started state to pause running processes inside the container. +    default: false +    required: false +  pid_mode: +    description: +      - Set the PID namespace mode for the container. Currently only supports 'host'. +    default: null +    required: false +  privileged: +    description: +      - Give extended privileges to the container. +    default: false +    required: false +  published_ports: +    description: +      - List of ports to publish from the container to the host. +      - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a +        container port, 9000 is a host port, and 0.0.0.0 is a host interface." +      - Container ports must be exposed either in the Dockerfile or via the C(expose) option. +      - A value of all will publish all exposed container ports to random host ports, ignoring +        any other mappings. +      - If C(networks) parameter is provided, will inspect each network to see if there exists +        a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4. +        If such a network is found, then published ports where no host IP address is specified +        will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4. +        Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4 +        value encountered in the list of C(networks) is the one that will be used. +    aliases: +      - ports +    required: false +    default: null +  pull: +    description: +       - If true, always pull the latest version of an image. Otherwise, will only pull an image when missing. +    default: false +    required: false +  purge_networks: +    description: +       - Remove the container from ALL networks not included in C(networks) parameter. +       - Any default networks such as I(bridge), if not found in C(networks), will be removed as well. +    default: false +    required: false +    version_added: "2.2" +  read_only: +    description: +      - Mount the container's root file system as read-only. +    default: false +    required: false +  recreate: +    description: +      - Use with present and started states to force the re-creation of an existing container. +    default: false +    required: false +  restart: +    description: +      - Use with started state to force a matching container to be stopped and restarted. +    default: false +    required: false +  restart_policy: +    description: +      - Container restart policy. Place quotes around I(no) option. +    choices: +      - always +      - no +      - on-failure +      - unless-stopped +    default: on-failure +    required: false +  restart_retries: +    description: +       - Use with restart policy to control maximum number of restart attempts. +    default: 0 +    required: false +  shm_size: +    description: +      - Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`. +        Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). +      - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`. +    default: null +    required: false +  security_opts: +    description: +      - List of security options in the form of C("label:user:User") +    default: null +    required: false +  state: +    description: +      - 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container +         rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.' +      - 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no +        container matches the name, a container will be created. If a container matches the name but the provided configuration +        does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created +        with the requested config. Image version will be taken into account when comparing configuration. To ignore image +        version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use +        force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed +        container.' +      - 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container +        matches the name, a container will be created and started. If a container matching the name is found but the +        configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed +        and a new container will be created with the requested configuration and started. Image version will be taken into +        account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always +        re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and +        restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated +        with a removed container.' +      - 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped +        state. Use force_kill to kill a container rather than stopping it.' +    required: false +    default: started +    choices: +      - absent +      - present +      - stopped +      - started +  stop_signal: +    description: +      - Override default signal used to stop the container. +    default: null +    required: false +  stop_timeout: +    description: +      - Number of seconds to wait for the container to stop before sending SIGKILL. +    required: false +    default: null +  trust_image_content: +    description: +      - If true, skip image verification. +    default: false +    required: false +  tty: +    description: +      - Allocate a psuedo-TTY. +    default: false +    required: false +  ulimits: +    description: +      - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)" +    default: null +    required: false +  user: +    description: +      - Sets the username or UID used and optionally the groupname or GID for the specified command. +      - "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]" +    default: null +    required: false +  uts: +    description: +      - Set the UTS namespace mode for the container. +    default: null +    required: false +  volumes: +    description: +      - List of volumes to mount within the container. +      - "Use docker CLI-style syntax: C(/host:/container[:mode])" +      - You can specify a read mode for the mount with either C(ro) or C(rw). +      - SELinux hosts can additionally use C(z) or C(Z) to use a shared or +        private label for the volume. +    default: null +    required: false +  volume_driver: +    description: +      - The container volume driver. +    default: none +    required: false +  volumes_from: +    description: +      - List of container names or Ids to get volumes from. +    default: null +    required: false +extends_documentation_fragment: +    - docker + +author: +    - "Cove Schneider (@cove)" +    - "Joshua Conner (@joshuaconner)" +    - "Pavel Antonov (@softzilla)" +    - "Thomas Steinbach (@ThomasSteinbach)" +    - "Philippe Jandot (@zfil)" +    - "Daan Oosterveld (@dusdanig)" +    - "James Tanner (@jctanner)" +    - "Chris Houseknecht (@chouseknecht)" + +requirements: +    - "python >= 2.6" +    - "docker-py >= 1.7.0" +    - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Create a data container +  docker_container: +    name: mydata +    image: busybox +    volumes: +      - /data + +- name: Re-create a redis container +  docker_container: +    name: myredis +    image: redis +    command: redis-server --appendonly yes +    state: present +    recreate: yes +    exposed_ports: +      - 6379 +    volumes_from: +      - mydata + +- name: Restart a container +  docker_container: +    name: myapplication +    image: someuser/appimage +    state: started +    restart: yes +    links: +     - "myredis:aliasedredis" +    devices: +     - "/dev/sda:/dev/xvda:rwm" +    ports: +     - "8080:9000" +     - "127.0.0.1:8081:9001/udp" +    env: +        SECRET_KEY: ssssh + +- name: Container present +  docker_container: +    name: mycontainer +    state: present +    image: ubuntu:14.04 +    command: sleep infinity + +- name: Stop a container +  docker_container: +    name: mycontainer +    state: stopped + +- name: Start 4 load-balanced containers +  docker_container: +    name: "container{{ item }}" +    recreate: yes +    image: someuser/anotherappimage +    command: sleep 1d +  with_sequence: count=4 + +- name: remove container +  docker_container: +    name: ohno +    state: absent + +- name: Syslogging output +  docker_container: +    name: myservice +    image: busybox +    log_driver: syslog +    log_options: +      syslog-address: tcp://my-syslog-server:514 +      syslog-facility: daemon +      # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for +      # older docker installs, use "syslog-tag" instead +      tag: myservice + +- name: Create db container and connect to network +  docker_container: +    name: db_test +    image: "postgres:latest" +    networks: +      - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link +  docker_container: +    name: sleeper +    image: ubuntu:14.04 +    networks: +      - name: TestingNet +        ipv4_address: "172.1.1.100" +        aliases: +          - sleepyzz +        links: +          - db_test:db +      - name: TestingNet2 + +- name: Start a container with a command +  docker_container: +    name: sleepy +    image: ubuntu:14.04 +    command: sleep infinity + +- name: Add container to networks +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet +        ipv4_address: 172.1.1.18 +        links: +          - sleeper +      - name: TestingNet2 +        ipv4_address: 172.1.10.20 + +- name: Update network with aliases +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet +        aliases: +          - sleepyz +          - zzzz + +- name: Remove container from one network +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet2 +    purge_networks: yes + +- name: Remove container from all networks +  docker_container: +    name: sleepy +    purge_networks: yes + +''' + +RETURN = ''' +docker_container: +    description: +      - Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin. +      - Facts representing the current state of the container. Matches the docker inspection output. +      - Note that facts are not part of registered vars but accessible directly. +      - Empty if C(state) is I(absent) +      - If detached is I(False), will include Output attribute containing any output from container run. +    returned: always +    type: dict +    sample: '{ +        "AppArmorProfile": "", +        "Args": [], +        "Config": { +            "AttachStderr": false, +            "AttachStdin": false, +            "AttachStdout": false, +            "Cmd": [ +                "/usr/bin/supervisord" +            ], +            "Domainname": "", +            "Entrypoint": null, +            "Env": [ +                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +            ], +            "ExposedPorts": { +                "443/tcp": {}, +                "80/tcp": {} +            }, +            "Hostname": "8e47bf643eb9", +            "Image": "lnmp_nginx:v1", +            "Labels": {}, +            "OnBuild": null, +            "OpenStdin": false, +            "StdinOnce": false, +            "Tty": false, +            "User": "", +            "Volumes": { +                "/tmp/lnmp/nginx-sites/logs/": {} +            }, +            ... +    }' +''' + +import re + +from ansible.module_utils.docker_common import * + +try: +    from docker import utils +    if HAS_DOCKER_PY_2: +        from docker.types import Ulimit +    else: +        from docker.utils.types import Ulimit +except: +    # missing docker-py handled in ansible.module_utils.docker +    pass + + +REQUIRES_CONVERSION_TO_BYTES = [ +    'memory', +    'memory_reservation', +    'memory_swap', +    'shm_size' +] + +VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z') + +class TaskParameters(DockerBaseClass): +    ''' +    Access and parse module parameters +    ''' + +    def __init__(self, client): +        super(TaskParameters, self).__init__() +        self.client = client + +        self.blkio_weight = None +        self.capabilities = None +        self.cleanup = None +        self.command = None +        self.cpu_period = None +        self.cpu_quota = None +        self.cpuset_cpus = None +        self.cpuset_mems = None +        self.cpu_shares = None +        self.detach = None +        self.debug = None +        self.devices = None +        self.dns_servers = None +        self.dns_opts = None +        self.dns_search_domains = None +        self.env = None +        self.env_file = None +        self.entrypoint = None +        self.etc_hosts = None +        self.exposed_ports = None +        self.force_kill = None +        self.groups = None +        self.hostname = None +        self.ignore_image = None +        self.image = None +        self.interactive = None +        self.ipc_mode = None +        self.keep_volumes = None +        self.kernel_memory = None +        self.kill_signal = None +        self.labels = None +        self.links = None +        self.log_driver = None +        self.log_options = None +        self.mac_address = None +        self.memory = None +        self.memory_reservation = None +        self.memory_swap = None +        self.memory_swappiness = None +        self.name = None +        self.network_mode = None +        self.networks = None +        self.oom_killer = None +        self.oom_score_adj = None +        self.paused = None +        self.pid_mode = None +        self.privileged = None +        self.purge_networks = None +        self.pull = None +        self.read_only = None +        self.recreate = None +        self.restart = None +        self.restart_retries = None +        self.restart_policy = None +        self.shm_size = None +        self.security_opts = None +        self.state = None +        self.stop_signal = None +        self.stop_timeout = None +        self.trust_image_content = None +        self.tty = None +        self.user = None +        self.uts = None +        self.volumes = None +        self.volume_binds = dict() +        self.volumes_from = None +        self.volume_driver = None + +        for key, value in client.module.params.items(): +            setattr(self, key, value) + +        for param_name in REQUIRES_CONVERSION_TO_BYTES: +            if client.module.params.get(param_name): +                try: +                    setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) +                except ValueError as exc: +                    self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) + +        self.publish_all_ports = False +        self.published_ports = self._parse_publish_ports() +        if self.published_ports in ('all', 'ALL'): +            self.publish_all_ports = True +            self.published_ports = None + +        self.ports = self._parse_exposed_ports(self.published_ports) +        self.log("expose ports:") +        self.log(self.ports, pretty_print=True) + +        self.links = self._parse_links(self.links) + +        if self.volumes: +            self.volumes = self._expand_host_paths() + +        self.env = self._get_environment() +        self.ulimits = self._parse_ulimits() +        self.log_config = self._parse_log_config() +        self.exp_links = None +        self.volume_binds = self._get_volume_binds(self.volumes) + +        self.log("volumes:") +        self.log(self.volumes, pretty_print=True) +        self.log("volume binds:") +        self.log(self.volume_binds, pretty_print=True) + +        if self.networks: +            for network in self.networks: +                if not network.get('name'): +                    self.fail("Parameter error: network must have a name attribute.") +                network['id'] = self._get_network_id(network['name']) +                if not network['id']: +                    self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) +                if network.get('links'): +                    network['links'] = self._parse_links(network['links']) + +    def fail(self, msg): +        self.client.module.fail_json(msg=msg) + +    @property +    def update_parameters(self): +        ''' +        Returns parameters used to update a container +        ''' + +        update_parameters = dict( +            blkio_weight='blkio_weight', +            cpu_period='cpu_period', +            cpu_quota='cpu_quota', +            cpu_shares='cpu_shares', +            cpuset_cpus='cpuset_cpus', +            mem_limit='memory', +            mem_reservation='mem_reservation', +            memswap_limit='memory_swap', +            kernel_memory='kernel_memory' +        ) +        result = dict() +        for key, value in update_parameters.items(): +            if getattr(self, value, None) is not None: +                result[key] = getattr(self, value) +        return result + +    @property +    def create_parameters(self): +        ''' +        Returns parameters used to create a container +        ''' +        create_params = dict( +            command='command', +            hostname='hostname', +            user='user', +            detach='detach', +            stdin_open='interactive', +            tty='tty', +            ports='ports', +            environment='env', +            name='name', +            entrypoint='entrypoint', +            cpu_shares='cpu_shares', +            mac_address='mac_address', +            labels='labels', +            stop_signal='stop_signal', +            volume_driver='volume_driver', +        ) + +        result = dict( +            host_config=self._host_config(), +            volumes=self._get_mounts(), +        ) + +        for key, value in create_params.items(): +            if getattr(self, value, None) is not None: +                result[key] = getattr(self, value) +        return result + +    def _expand_host_paths(self): +        new_vols = [] +        for vol in self.volumes: +            if ':' in vol: +                if len(vol.split(':')) == 3: +                    host, container, mode = vol.split(':') +                    if re.match(r'[\.~]', host): +                        host = os.path.abspath(host) +                    new_vols.append("%s:%s:%s" % (host, container, mode)) +                    continue +                elif len(vol.split(':')) == 2: +                    parts = vol.split(':') +                    if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]): +                        host = os.path.abspath(parts[0]) +                        new_vols.append("%s:%s:rw" % (host, parts[1])) +                        continue +            new_vols.append(vol) +        return new_vols + +    def _get_mounts(self): +        ''' +        Return a list of container mounts. +        :return: +        ''' +        result = [] +        if self.volumes: +            for vol in self.volumes: +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, _ = vol.split(':') +                        result.append(container) +                        continue +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            result.append(parts[1]) +                            continue +                result.append(vol) +        self.log("mounts:") +        self.log(result, pretty_print=True) +        return result + +    def _host_config(self): +        ''' +        Returns parameters used to create a HostConfig object +        ''' + +        host_config_params=dict( +            port_bindings='published_ports', +            publish_all_ports='publish_all_ports', +            links='links', +            privileged='privileged', +            dns='dns_servers', +            dns_search='dns_search_domains', +            binds='volume_binds', +            volumes_from='volumes_from', +            network_mode='network_mode', +            cap_add='capabilities', +            extra_hosts='etc_hosts', +            read_only='read_only', +            ipc_mode='ipc_mode', +            security_opt='security_opts', +            ulimits='ulimits', +            log_config='log_config', +            mem_limit='memory', +            memswap_limit='memory_swap', +            mem_swappiness='memory_swappiness', +            oom_score_adj='oom_score_adj', +            shm_size='shm_size', +            group_add='groups', +            devices='devices', +            pid_mode='pid_mode' +        ) +        params = dict() +        for key, value in host_config_params.items(): +            if getattr(self, value, None) is not None: +                params[key] = getattr(self, value) + +        if self.restart_policy: +            params['restart_policy'] = dict(Name=self.restart_policy, +                                            MaximumRetryCount=self.restart_retries) + +        return self.client.create_host_config(**params) + +    @property +    def default_host_ip(self): +        ip = '0.0.0.0' +        if not self.networks: +            return ip +        for net in self.networks: +            if net.get('name'): +                network = self.client.inspect_network(net['name']) +                if network.get('Driver') == 'bridge' and \ +                   network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): +                    ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] +                    break +        return ip + +    def _parse_publish_ports(self): +        ''' +        Parse ports from docker CLI syntax +        ''' +        if self.published_ports is None: +            return None + +        if 'all' in self.published_ports: +            return 'all' + +        default_ip = self.default_host_ip + +        binds = {} +        for port in self.published_ports: +            parts = str(port).split(':') +            container_port = parts[-1] +            if '/' not in container_port: +                container_port = int(parts[-1]) + +            p_len = len(parts) +            if p_len == 1: +                bind = (default_ip,) +            elif p_len == 2: +                bind = (default_ip, int(parts[0])) +            elif p_len == 3: +                bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) + +            if container_port in binds: +                old_bind = binds[container_port] +                if isinstance(old_bind, list): +                    old_bind.append(bind) +                else: +                    binds[container_port] = [binds[container_port], bind] +            else: +                binds[container_port] = bind +        return binds + +    @staticmethod +    def _get_volume_binds(volumes): +        ''' +        Extract host bindings, if any, from list of volume mapping strings. + +        :return: dictionary of bind mappings +        ''' +        result = dict() +        if volumes: +            for vol in volumes: +                host = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = (vol.split(':') + ['rw']) +                if host is not None: +                    result[host] = dict( +                        bind=container, +                        mode=mode +                    ) +        return result + +    def _parse_exposed_ports(self, published_ports): +        ''' +        Parse exposed ports from docker CLI-style ports syntax. +        ''' +        exposed = [] +        if self.exposed_ports: +            for port in self.exposed_ports: +                port = str(port).strip() +                protocol = 'tcp' +                match = re.search(r'(/.+$)', port) +                if match: +                    protocol = match.group(1).replace('/', '') +                    port = re.sub(r'/.+$', '', port) +                exposed.append((port, protocol)) +        if published_ports: +            # Any published port should also be exposed +            for publish_port in published_ports: +                match = False +                if isinstance(publish_port, basestring) and '/' in publish_port: +                    port, protocol = publish_port.split('/') +                    port = int(port) +                else: +                    protocol = 'tcp' +                    port = int(publish_port) +                for exposed_port in exposed: +                    if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]: +                        start_port, end_port = exposed_port[0].split('-') +                        if int(start_port) <= port <= int(end_port): +                            match = True +                    elif exposed_port[0] == port: +                        match = True +                if not match: +                    exposed.append((port, protocol)) +        return exposed + +    @staticmethod +    def _parse_links(links): +        ''' +        Turn links into a dictionary +        ''' +        if links is None: +            return None + +        result = {} +        for link in links: +            parsed_link = link.split(':', 1) +            if len(parsed_link) == 2: +                result[parsed_link[0]] = parsed_link[1] +            else: +                result[parsed_link[0]] = parsed_link[0] +        return result + +    def _parse_ulimits(self): +        ''' +        Turn ulimits into an array of Ulimit objects +        ''' +        if self.ulimits is None: +            return None + +        results = [] +        for limit in self.ulimits: +            limits = dict() +            pieces = limit.split(':') +            if len(pieces) >= 2: +                limits['name'] = pieces[0] +                limits['soft'] = int(pieces[1]) +                limits['hard'] = int(pieces[1]) +            if len(pieces) == 3: +                limits['hard'] = int(pieces[2]) +            try: +                results.append(Ulimit(**limits)) +            except ValueError as exc: +                self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) +        return results + +    def _parse_log_config(self): +        ''' +        Create a LogConfig object +        ''' +        if self.log_driver is None: +            return None + +        options = dict( +            Type=self.log_driver, +            Config = dict() +        ) + +        if self.log_options is not None: +            options['Config'] = self.log_options + +        try: +            return LogConfig(**options) +        except ValueError as exc: +            self.fail('Error parsing logging options - %s' % (exc)) + +    def _get_environment(self): +        """ +        If environment file is combined with explicit environment variables, the explicit environment variables +        take precedence. +        """ +        final_env = {} +        if self.env_file: +            parsed_env_file = utils.parse_env_file(self.env_file) +            for name, value in parsed_env_file.items(): +                final_env[name] = str(value) +        if self.env: +            for name, value in self.env.items(): +                final_env[name] = str(value) +        return final_env + +    def _get_network_id(self, network_name): +        network_id = None +        try: +            for network in self.client.networks(names=[network_name]): +                if network['Name'] == network_name: +                    network_id = network['Id'] +                    break +        except Exception as exc: +            self.fail("Error getting network id for %s - %s" % (network_name, str(exc))) +        return network_id + + + +class Container(DockerBaseClass): + +    def __init__(self, container, parameters): +        super(Container, self).__init__() +        self.raw = container +        self.Id = None +        self.container = container +        if container: +            self.Id = container['Id'] +            self.Image = container['Image'] +        self.log(self.container, pretty_print=True) +        self.parameters = parameters +        self.parameters.expected_links = None +        self.parameters.expected_ports = None +        self.parameters.expected_exposed = None +        self.parameters.expected_volumes = None +        self.parameters.expected_ulimits = None +        self.parameters.expected_etc_hosts = None +        self.parameters.expected_env = None + +    def fail(self, msg): +        self.parameters.client.module.fail_json(msg=msg) + +    @property +    def exists(self): +        return True if self.container else False + +    @property +    def running(self): +        if self.container and self.container.get('State'): +            if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): +                return True +        return False + +    def has_different_configuration(self, image): +        ''' +        Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) +        ''' +        self.log('Starting has_different_configuration') +        self.parameters.expected_entrypoint = self._get_expected_entrypoint() +        self.parameters.expected_links = self._get_expected_links() +        self.parameters.expected_ports = self._get_expected_ports() +        self.parameters.expected_exposed = self._get_expected_exposed(image) +        self.parameters.expected_volumes = self._get_expected_volumes(image) +        self.parameters.expected_binds = self._get_expected_binds(image) +        self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) +        self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') +        self.parameters.expected_env = self._get_expected_env(image) +        self.parameters.expected_cmd = self._get_expected_cmd() +        self.parameters.expected_devices = self._get_expected_devices() + +        if not self.container.get('HostConfig'): +            self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") +        if not self.container.get('Config'): +            self.fail("has_config_diff: Error parsing container properties. Config missing.") +        if not self.container.get('NetworkSettings'): +            self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") + +        host_config = self.container['HostConfig'] +        log_config = host_config.get('LogConfig', dict()) +        restart_policy = host_config.get('RestartPolicy', dict()) +        config = self.container['Config'] +        network = self.container['NetworkSettings'] + +        # The previous version of the docker module ignored the detach state by +        # assuming if the container was running, it must have been detached. +        detach = not (config.get('AttachStderr') and config.get('AttachStdout')) + +        # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 +        if config.get('ExposedPorts') is not None: +            expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()] +        else: +            expected_exposed = [] + +        # Map parameters to container inspect results +        config_mapping = dict( +            image=config.get('Image'), +            expected_cmd=config.get('Cmd'), +            hostname=config.get('Hostname'), +            user=config.get('User'), +            detach=detach, +            interactive=config.get('OpenStdin'), +            capabilities=host_config.get('CapAdd'), +            expected_devices=host_config.get('Devices'), +            dns_servers=host_config.get('Dns'), +            dns_opts=host_config.get('DnsOptions'), +            dns_search_domains=host_config.get('DnsSearch'), +            expected_env=(config.get('Env') or []), +            expected_entrypoint=config.get('Entrypoint'), +            expected_etc_hosts=host_config['ExtraHosts'], +            expected_exposed=expected_exposed, +            groups=host_config.get('GroupAdd'), +            ipc_mode=host_config.get("IpcMode"), +            labels=config.get('Labels'), +            expected_links=host_config.get('Links'), +            log_driver=log_config.get('Type'), +            log_options=log_config.get('Config'), +            mac_address=network.get('MacAddress'), +            memory_swappiness=host_config.get('MemorySwappiness'), +            network_mode=host_config.get('NetworkMode'), +            oom_killer=host_config.get('OomKillDisable'), +            oom_score_adj=host_config.get('OomScoreAdj'), +            pid_mode=host_config.get('PidMode'), +            privileged=host_config.get('Privileged'), +            expected_ports=host_config.get('PortBindings'), +            read_only=host_config.get('ReadonlyRootfs'), +            restart_policy=restart_policy.get('Name'), +            restart_retries=restart_policy.get('MaximumRetryCount'), +            # Cannot test shm_size, as shm_size is not included in container inspection results. +            # shm_size=host_config.get('ShmSize'), +            security_opts=host_config.get("SecuriytOpt"), +            stop_signal=config.get("StopSignal"), +            tty=config.get('Tty'), +            expected_ulimits=host_config.get('Ulimits'), +            uts=host_config.get('UTSMode'), +            expected_volumes=config.get('Volumes'), +            expected_binds=host_config.get('Binds'), +            volumes_from=host_config.get('VolumesFrom'), +            volume_driver=host_config.get('VolumeDriver') +        ) + +        differences = [] +        for key, value in config_mapping.items(): +            self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value))) +            if getattr(self.parameters, key, None) is not None: +                if isinstance(getattr(self.parameters, key), list) and isinstance(value, list): +                    if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict): +                        # compare list of dictionaries +                        self.log("comparing list of dict: %s" % key) +                        match = self._compare_dictionary_lists(getattr(self.parameters, key), value) +                    else: +                        # compare two lists. Is list_a in list_b? +                        self.log("comparing lists: %s" % key) +                        set_a = set(getattr(self.parameters, key)) +                        set_b = set(value) +                        match = (set_a <= set_b) +                elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict): +                    # compare two dicts +                    self.log("comparing two dicts: %s" % key) +                    match = self._compare_dicts(getattr(self.parameters, key), value) +                else: +                    # primitive compare +                    self.log("primitive compare: %s" % key) +                    match = (getattr(self.parameters, key) == value) + +                if not match: +                    # no match. record the differences +                    item = dict() +                    item[key] = dict( +                        parameter=getattr(self.parameters, key), +                        container=value +                    ) +                    differences.append(item) + +        has_differences = True if len(differences) > 0 else False +        return has_differences, differences + +    def _compare_dictionary_lists(self, list_a, list_b): +        ''' +        If all of list_a exists in list_b, return True +        ''' +        if not isinstance(list_a, list) or not isinstance(list_b, list): +            return False +        matches = 0 +        for dict_a in list_a: +            for dict_b in list_b: +                if self._compare_dicts(dict_a, dict_b): +                    matches += 1 +                    break +        result = (matches == len(list_a)) +        return result + +    def _compare_dicts(self, dict_a, dict_b): +        ''' +        If dict_a in dict_b, return True +        ''' +        if not isinstance(dict_a, dict) or not isinstance(dict_b, dict): +            return False +        for key, value in dict_a.items(): +            if isinstance(value, dict): +                match = self._compare_dicts(value, dict_b.get(key)) +            elif isinstance(value, list): +                if len(value) > 0 and isinstance(value[0], dict): +                    match = self._compare_dictionary_lists(value, dict_b.get(key)) +                else: +                    set_a = set(value) +                    set_b = set(dict_b.get(key)) +                    match = (set_a == set_b) +            else: +                match = (value == dict_b.get(key)) +            if not match: +                return False +        return True + +    def has_different_resource_limits(self): +        ''' +        Diff parameters and container resource limits +        ''' +        if not self.container.get('HostConfig'): +            self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") + +        host_config = self.container['HostConfig'] + +        config_mapping = dict( +            cpu_period=host_config.get('CpuPeriod'), +            cpu_quota=host_config.get('CpuQuota'), +            cpuset_cpus=host_config.get('CpusetCpus'), +            cpuset_mems=host_config.get('CpusetMems'), +            cpu_shares=host_config.get('CpuShares'), +            kernel_memory=host_config.get("KernelMemory"), +            memory=host_config.get('Memory'), +            memory_reservation=host_config.get('MemoryReservation'), +            memory_swap=host_config.get('MemorySwap'), +            oom_score_adj=host_config.get('OomScoreAdj'), +        ) + +        differences = [] +        for key, value in config_mapping.items(): +            if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value: +                # no match. record the differences +                item = dict() +                item[key] = dict( +                    parameter=getattr(self.parameters, key), +                    container=value +                ) +                differences.append(item) +        different = (len(differences) > 0) +        return different, differences + +    def has_network_differences(self): +        ''' +        Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 +        ''' +        different = False +        differences = [] + +        if not self.parameters.networks: +            return different, differences + +        if not self.container.get('NetworkSettings'): +            self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + +        connected_networks = self.container['NetworkSettings']['Networks'] +        for network in self.parameters.networks: +            if connected_networks.get(network['name'], None) is None: +                different = True +                differences.append(dict( +                    parameter=network, +                    container=None +                )) +            else: +                diff = False +                if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'): +                    diff = True +                if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'): +                    diff = True +                if network.get('aliases') and not connected_networks[network['name']].get('Aliases'): +                    diff = True +                if network.get('aliases') and connected_networks[network['name']].get('Aliases'): +                    for alias in network.get('aliases'): +                        if alias not in connected_networks[network['name']].get('Aliases', []): +                            diff = True +                if network.get('links') and not connected_networks[network['name']].get('Links'): +                    diff = True +                if network.get('links') and connected_networks[network['name']].get('Links'): +                    expected_links = [] +                    for link, alias in network['links'].items(): +                        expected_links.append("%s:%s" % (link, alias)) +                    for link in expected_links: +                        if link not in connected_networks[network['name']].get('Links', []): +                            diff = True +                if diff: +                    different = True +                    differences.append(dict( +                        parameter=network, +                        container=dict( +                            name=network['name'], +                            ipv4_address=connected_networks[network['name']].get('IPAddress'), +                            ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'), +                            aliases=connected_networks[network['name']].get('Aliases'), +                            links=connected_networks[network['name']].get('Links') +                        ) +                    )) +        return different, differences + +    def has_extra_networks(self): +        ''' +        Check if the container is connected to non-requested networks +        ''' +        extra_networks = [] +        extra = False + +        if not self.container.get('NetworkSettings'): +            self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + +        connected_networks = self.container['NetworkSettings'].get('Networks') +        if connected_networks: +            for network, network_config in connected_networks.items(): +                keep = False +                if self.parameters.networks: +                    for expected_network in self.parameters.networks: +                        if expected_network['name'] == network: +                            keep = True +                if not keep: +                    extra = True +                    extra_networks.append(dict(name=network, id=network_config['NetworkID'])) +        return extra, extra_networks + +    def _get_expected_devices(self): +        if not self.parameters.devices: +            return None +        expected_devices = [] +        for device in self.parameters.devices: +            parts = device.split(':') +            if len(parts) == 1: +                expected_devices.append( +                    dict( +                        CgroupPermissions='rwm', +                        PathInContainer=parts[0], +                        PathOnHost=parts[0] +                    )) +            elif len(parts) == 2: +                parts = device.split(':') +                expected_devices.append( +                    dict( +                        CgroupPermissions='rwm', +                        PathInContainer=parts[1], +                        PathOnHost=parts[0] +                    ) +                ) +            else: +                expected_devices.append( +                    dict( +                        CgroupPermissions=parts[2], +                        PathInContainer=parts[1], +                        PathOnHost=parts[0] +                        )) +        return expected_devices + +    def _get_expected_entrypoint(self): +        self.log('_get_expected_entrypoint') +        if not self.parameters.entrypoint: +            return None +        return shlex.split(self.parameters.entrypoint) + +    def _get_expected_ports(self): +        if not self.parameters.published_ports: +            return None +        expected_bound_ports = {} +        for container_port, config in self.parameters.published_ports.items(): +            if isinstance(container_port, int): +                container_port = "%s/tcp" % container_port +            if len(config) == 1: +                expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] +            elif isinstance(config[0], tuple): +                expected_bound_ports[container_port] = [] +                for host_ip, host_port in config: +                    expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)}) +            else: +                expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] +        return expected_bound_ports + +    def _get_expected_links(self): +        if self.parameters.links is None: +            return None +        self.log('parameter links:') +        self.log(self.parameters.links, pretty_print=True) +        exp_links = [] +        for link, alias in self.parameters.links.items(): +            exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) +        return exp_links + +    def _get_expected_binds(self, image): +        self.log('_get_expected_binds') +        image_vols = [] +        if image: +            image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes')) +        param_vols = [] +        if self.parameters.volumes: +            for vol in self.parameters.volumes: +                host = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = vol.split(':') + ['rw'] +                if host: +                    param_vols.append("%s:%s:%s" % (host, container, mode)) +        result = list(set(image_vols + param_vols)) +        self.log("expected_binds:") +        self.log(result, pretty_print=True) +        return result + +    def _get_image_binds(self, volumes): +        ''' +        Convert array of binds to array of strings with format host_path:container_path:mode + +        :param volumes: array of bind dicts +        :return: array of strings +        ''' +        results = [] +        if isinstance(volumes, dict): +            results += self._get_bind_from_dict(volumes) +        elif isinstance(volumes, list): +            for vol in volumes: +                results += self._get_bind_from_dict(vol) +        return results + +    @staticmethod +    def _get_bind_from_dict(volume_dict): +        results = [] +        if volume_dict: +            for host_path, config in volume_dict.items(): +                if isinstance(config, dict) and config.get('bind'): +                    container_path = config.get('bind') +                    mode = config.get('mode', 'rw') +                    results.append("%s:%s:%s" % (host_path, container_path, mode)) +        return results + +    def _get_expected_volumes(self, image): +        self.log('_get_expected_volumes') +        expected_vols = dict() +        if image and image['ContainerConfig'].get('Volumes'): +            expected_vols.update(image['ContainerConfig'].get('Volumes')) + +        if self.parameters.volumes: +            for vol in self.parameters.volumes: +                container = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = vol.split(':') + ['rw'] +                new_vol = dict() +                if container: +                    new_vol[container] = dict() +                else: +                    new_vol[vol] = dict() +                expected_vols.update(new_vol) + +        if not expected_vols: +            expected_vols = None +        self.log("expected_volumes:") +        self.log(expected_vols, pretty_print=True) +        return expected_vols + +    def _get_expected_env(self, image): +        self.log('_get_expected_env') +        expected_env = dict() +        if image and image['ContainerConfig'].get('Env'): +            for env_var in image['ContainerConfig']['Env']: +                parts = env_var.split('=', 1) +                expected_env[parts[0]] = parts[1] +        if self.parameters.env: +            expected_env.update(self.parameters.env) +        param_env = [] +        for key, value in expected_env.items(): +            param_env.append("%s=%s" % (key, value)) +        return param_env + +    def _get_expected_exposed(self, image): +        self.log('_get_expected_exposed') +        image_ports = [] +        if image: +            image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()] +        param_ports = [] +        if self.parameters.ports: +            param_ports = [str(p[0]) for p in self.parameters.ports] +        result = list(set(image_ports + param_ports)) +        self.log(result, pretty_print=True) +        return result + +    def _get_expected_ulimits(self, config_ulimits): +        self.log('_get_expected_ulimits') +        if config_ulimits is None: +            return None +        results = [] +        for limit in config_ulimits: +            results.append(dict( +                Name=limit.name, +                Soft=limit.soft, +                Hard=limit.hard +            )) +        return results + +    def _get_expected_cmd(self): +        self.log('_get_expected_cmd') +        if not self.parameters.command: +            return None +        return shlex.split(self.parameters.command) + +    def _convert_simple_dict_to_list(self, param_name, join_with=':'): +        if getattr(self.parameters, param_name, None) is None: +            return None +        results = [] +        for key, value in getattr(self.parameters, param_name).items(): +            results.append("%s%s%s" % (key, join_with, value)) +        return results + + +class ContainerManager(DockerBaseClass): +    ''' +    Perform container management tasks +    ''' + +    def __init__(self, client): + +        super(ContainerManager, self).__init__() + +        self.client = client +        self.parameters = TaskParameters(client) +        self.check_mode = self.client.check_mode +        self.results = {'changed': False, 'actions': []} +        self.diff = {} +        self.facts = {} + +        state = self.parameters.state +        if state in ('stopped', 'started', 'present'): +            self.present(state) +        elif state == 'absent': +            self.absent() + +        if not self.check_mode and not self.parameters.debug: +            self.results.pop('actions') + +        if self.client.module._diff or self.parameters.debug: +            self.results['diff'] = self.diff + +        if self.facts: +            self.results['ansible_facts'] = {'docker_container': self.facts} + +    def present(self, state): +        container = self._get_container(self.parameters.name) +        image = self._get_image() + +        if not container.exists: +            # New container +            self.log('No container found') +            new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) +            if new_container: +                container = new_container +        else: +            # Existing container +            different, differences = container.has_different_configuration(image) +            image_different = False +            if not self.parameters.ignore_image: +                image_different = self._image_is_different(image, container) +            if image_different or different or self.parameters.recreate: +                self.diff['differences'] = differences +                if image_different: +                    self.diff['image_different'] = True +                self.log("differences") +                self.log(differences, pretty_print=True) +                if container.running: +                    self.container_stop(container.Id) +                self.container_remove(container.Id) +                new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) +                if new_container: +                    container = new_container + +        if container and container.exists: +            container = self.update_limits(container) +            container = self.update_networks(container) + +            if state == 'started' and not container.running: +                container = self.container_start(container.Id) +            elif state == 'started' and self.parameters.restart: +                self.container_stop(container.Id) +                container = self.container_start(container.Id) +            elif state == 'stopped' and container.running: +                self.container_stop(container.Id) +                container = self._get_container(container.Id) + +        self.facts = container.raw + +    def absent(self): +        container = self._get_container(self.parameters.name) +        if container.exists: +            if container.running: +                self.container_stop(container.Id) +            self.container_remove(container.Id) + +    def fail(self, msg, **kwargs): +        self.client.module.fail_json(msg=msg, **kwargs) + +    def _get_container(self, container): +        ''' +        Expects container ID or Name. Returns a container object +        ''' +        return Container(self.client.get_container(container), self.parameters) + +    def _get_image(self): +        if not self.parameters.image: +            self.log('No image specified') +            return None +        repository, tag = utils.parse_repository_tag(self.parameters.image) +        if not tag: +            tag = "latest" +        image = self.client.find_image(repository, tag) +        if not self.check_mode: +            if not image or self.parameters.pull: +                self.log("Pull the image.") +                image, alreadyToLatest = self.client.pull_image(repository, tag) +                if alreadyToLatest: +                    self.results['changed'] = False +                else: +                    self.results['changed'] = True +                    self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) +        self.log("image") +        self.log(image, pretty_print=True) +        return image + +    def _image_is_different(self, image, container): +        if image and image.get('Id'): +            if container and container.Image: +                if image.get('Id') != container.Image: +                    return True +        return False + +    def update_limits(self, container): +        limits_differ, different_limits = container.has_different_resource_limits() +        if limits_differ: +            self.log("limit differences:") +            self.log(different_limits, pretty_print=True) +        if limits_differ and not self.check_mode: +            self.container_update(container.Id, self.parameters.update_parameters) +            return self._get_container(container.Id) +        return container + +    def update_networks(self, container): +        has_network_differences, network_differences = container.has_network_differences() +        updated_container = container +        if has_network_differences: +            if self.diff.get('differences'): +                self.diff['differences'].append(dict(network_differences=network_differences)) +            else: +                self.diff['differences'] = [dict(network_differences=network_differences)] +            self.results['changed'] = True +            updated_container = self._add_networks(container, network_differences) + +        if self.parameters.purge_networks: +            has_extra_networks, extra_networks = container.has_extra_networks() +            if has_extra_networks: +                if self.diff.get('differences'): +                    self.diff['differences'].append(dict(purge_networks=extra_networks)) +                else: +                    self.diff['differences'] = [dict(purge_networks=extra_networks)] +                self.results['changed'] = True +                updated_container = self._purge_networks(container, extra_networks) +        return updated_container + +    def _add_networks(self, container, differences): +        for diff in differences: +            # remove the container from the network, if connected +            if diff.get('container'): +                self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) +                if not self.check_mode: +                    try: +                        self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) +                    except Exception as exc: +                        self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], +                                                                                          str(exc))) +            # connect to the network +            params = dict( +                ipv4_address=diff['parameter'].get('ipv4_address', None), +                ipv6_address=diff['parameter'].get('ipv6_address', None), +                links=diff['parameter'].get('links', None), +                aliases=diff['parameter'].get('aliases', None) +            ) +            self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) +            if not self.check_mode: +                try: +                    self.log("Connecting container to network %s" % diff['parameter']['id']) +                    self.log(params, pretty_print=True) +                    self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) +                except Exception as exc: +                    self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc))) +        return self._get_container(container.Id) + +    def _purge_networks(self, container, networks): +        for network in networks: +            self.results['actions'].append(dict(removed_from_network=network['name'])) +            if not self.check_mode: +                try: +                    self.client.disconnect_container_from_network(container.Id, network['name']) +                except Exception as exc: +                    self.fail("Error disconnecting container from network %s - %s" % (network['name'], +                                                                                      str(exc))) +        return self._get_container(container.Id) + +    def container_create(self, image, create_parameters): +        self.log("create container") +        self.log("image: %s parameters:" % image) +        self.log(create_parameters, pretty_print=True) +        self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) +        self.results['changed'] = True +        new_container = None +        if not self.check_mode: +            try: +                new_container = self.client.create_container(image, **create_parameters) +            except Exception as exc: +                self.fail("Error creating container: %s" % str(exc)) +            return self._get_container(new_container['Id']) +        return new_container + +    def container_start(self, container_id): +        self.log("start container %s" % (container_id)) +        self.results['actions'].append(dict(started=container_id)) +        self.results['changed'] = True +        if not self.check_mode: +            try: +                self.client.start(container=container_id) +            except Exception as exc: +                self.fail("Error starting container %s: %s" % (container_id, str(exc))) + +            if not self.parameters.detach: +                status = self.client.wait(container_id) +                output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) +                if status != 0: +                    self.fail(output, status=status) +                if self.parameters.cleanup: +                    self.container_remove(container_id, force=True) +                insp = self._get_container(container_id) +                if insp.raw: +                    insp.raw['Output'] = output +                else: +                    insp.raw = dict(Output=output) +                return insp +        return self._get_container(container_id) + +    def container_remove(self, container_id, link=False, force=False): +        volume_state = (not self.parameters.keep_volumes) +        self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) +        self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) +            except Exception as exc: +                self.fail("Error removing container %s: %s" % (container_id, str(exc))) +        return response + +    def container_update(self, container_id, update_parameters): +        if update_parameters: +            self.log("update container %s" % (container_id)) +            self.log(update_parameters, pretty_print=True) +            self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) +            self.results['changed'] = True +            if not self.check_mode and callable(getattr(self.client, 'update_container')): +                try: +                    self.client.update_container(container_id, **update_parameters) +                except Exception as exc: +                    self.fail("Error updating container %s: %s" % (container_id, str(exc))) +        return self._get_container(container_id) + +    def container_kill(self, container_id): +        self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                if self.parameters.kill_signal: +                    response = self.client.kill(container_id, signal=self.parameters.kill_signal) +                else: +                    response = self.client.kill(container_id) +            except Exception as exc: +                self.fail("Error killing container %s: %s" % (container_id, exc)) +        return response + +    def container_stop(self, container_id): +        if self.parameters.force_kill: +            self.container_kill(container_id) +            return +        self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                if self.parameters.stop_timeout: +                    response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) +                else: +                    response = self.client.stop(container_id) +            except Exception as exc: +                self.fail("Error stopping container %s: %s" % (container_id, str(exc))) +        return response + + +def main(): +    argument_spec = dict( +        blkio_weight=dict(type='int'), +        capabilities=dict(type='list'), +        cleanup=dict(type='bool', default=False), +        command=dict(type='str'), +        cpu_period=dict(type='int'), +        cpu_quota=dict(type='int'), +        cpuset_cpus=dict(type='str'), +        cpuset_mems=dict(type='str'), +        cpu_shares=dict(type='int'), +        detach=dict(type='bool', default=True), +        devices=dict(type='list'), +        dns_servers=dict(type='list'), +        dns_opts=dict(type='list'), +        dns_search_domains=dict(type='list'), +        env=dict(type='dict'), +        env_file=dict(type='path'), +        entrypoint=dict(type='str'), +        etc_hosts=dict(type='dict'), +        exposed_ports=dict(type='list', aliases=['exposed', 'expose']), +        force_kill=dict(type='bool', default=False, aliases=['forcekill']), +        groups=dict(type='list'), +        hostname=dict(type='str'), +        ignore_image=dict(type='bool', default=False), +        image=dict(type='str'), +        interactive=dict(type='bool', default=False), +        ipc_mode=dict(type='str'), +        keep_volumes=dict(type='bool', default=True), +        kernel_memory=dict(type='str'), +        kill_signal=dict(type='str'), +        labels=dict(type='dict'), +        links=dict(type='list'), +        log_driver=dict(type='str', +                        choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], +                        default=None), +        log_options=dict(type='dict', aliases=['log_opt']), +        mac_address=dict(type='str'), +        memory=dict(type='str', default='0'), +        memory_reservation=dict(type='str'), +        memory_swap=dict(type='str'), +        memory_swappiness=dict(type='int'), +        name=dict(type='str', required=True), +        network_mode=dict(type='str'), +        networks=dict(type='list'), +        oom_killer=dict(type='bool'), +        oom_score_adj=dict(type='int'), +        paused=dict(type='bool', default=False), +        pid_mode=dict(type='str'), +        privileged=dict(type='bool', default=False), +        published_ports=dict(type='list', aliases=['ports']), +        pull=dict(type='bool', default=False), +        purge_networks=dict(type='bool', default=False), +        read_only=dict(type='bool', default=False), +        recreate=dict(type='bool', default=False), +        restart=dict(type='bool', default=False), +        restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), +        restart_retries=dict(type='int', default=None), +        shm_size=dict(type='str'), +        security_opts=dict(type='list'), +        state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'), +        stop_signal=dict(type='str'), +        stop_timeout=dict(type='int'), +        trust_image_content=dict(type='bool', default=False), +        tty=dict(type='bool', default=False), +        ulimits=dict(type='list'), +        user=dict(type='str'), +        uts=dict(type='str'), +        volumes=dict(type='list'), +        volumes_from=dict(type='list'), +        volume_driver=dict(type='str'), +    ) + +    required_if = [ +        ('state', 'present', ['image']) +    ] + +    client = AnsibleDockerClient( +        argument_spec=argument_spec, +        required_if=required_if, +        supports_check_mode=True +    ) + +    cm = ContainerManager(client) +    client.module.exit_json(**cm.results) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': +    main()
\ No newline at end of file diff --git a/roles/openshift_health_checker/library/docker_info.py b/roles/openshift_health_checker/library/docker_info.py new file mode 100644 index 000000000..7f712bcff --- /dev/null +++ b/roles/openshift_health_checker/library/docker_info.py @@ -0,0 +1,24 @@ +# pylint: disable=missing-docstring +""" +Ansible module for determining information about the docker host. + +While there are several ansible modules that make use of the docker +api to expose container and image facts in a remote host, they +are unable to return specific information about the host machine +itself. This module exposes the same information obtained through +executing the `docker info` command on a docker host, in json format. +""" + +from ansible.module_utils.docker_common import AnsibleDockerClient + + +def main(): +    client = AnsibleDockerClient() + +    client.module.exit_json( +        info=client.info(), +    ) + + +if __name__ == '__main__': +    main() diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py new file mode 100644 index 000000000..7a7498cb7 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -0,0 +1,168 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, get_var + + +class DockerImageAvailability(OpenShiftCheck): +    """Check that required Docker images are available. + +    This check attempts to ensure that required docker images are +    either present locally, or able to be pulled down from available +    registries defined in a host machine. +    """ + +    name = "docker_image_availability" +    tags = ["preflight"] + +    skopeo_image = "openshift/openshift-ansible" + +    docker_image_base = { +        "origin": { +            "repo": "openshift", +            "image": "origin", +        }, +        "openshift-enterprise": { +            "repo": "openshift3", +            "image": "ose", +        }, +    } + +    def run(self, tmp, task_vars): +        required_images = self.required_images(task_vars) +        missing_images = set(required_images) - set(self.local_images(required_images, task_vars)) + +        # exit early if all images were found locally +        if not missing_images: +            return {"changed": False} + +        msg, failed, changed = self.update_skopeo_image(task_vars) + +        # exit early if Skopeo update fails +        if failed: +            return { +                "failed": True, +                "changed": changed, +                "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg), +            } + +        registries = self.known_docker_registries(task_vars) +        available_images = self.available_images(missing_images, registries, task_vars) +        unavailable_images = set(missing_images) - set(available_images) + +        if unavailable_images: +            return { +                "failed": True, +                "msg": ( +                    "One or more required images are not available: {}.\n" +                    "Configured registries: {}" +                ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)), +                "changed": changed, +            } + +        return {"changed": changed} + +    def required_images(self, task_vars): +        deployment_type = get_var(task_vars, "deployment_type") +        image_base_name = self.docker_image_base[deployment_type] + +        openshift_release = get_var(task_vars, "openshift_release") +        openshift_image_tag = get_var(task_vars, "openshift_image_tag") + +        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") + +        if is_containerized: +            images = set(self.containerized_docker_images(image_base_name, openshift_release)) +        else: +            images = set(self.rpm_docker_images(image_base_name, openshift_release)) + +        # append images with qualified image tags to our list of required images. +        # these are images with a (v0.0.0.0) tag, rather than a standard release +        # format tag (v0.0). We want to check this set in both containerized and +        # non-containerized installations. +        images.update( +            self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag) +        ) + +        return images + +    def local_images(self, images, task_vars): +        """Filter a list of images and return those available locally.""" +        return [ +            image for image in images +            if self.is_image_local(image, task_vars) +        ] + +    def is_image_local(self, image, task_vars): +        result = self.module_executor("docker_image_facts", {"name": image}, task_vars) +        if result.get("failed", False): +            return False + +        return bool(result.get("images", [])) + +    def known_docker_registries(self, task_vars): +        result = self.module_executor("docker_info", {}, task_vars) + +        if result.get("failed", False): +            return [] + +        docker_info = result.get("info", "") +        return [registry.get("Name", "") for registry in docker_info.get("Registries", {})] + +    def available_images(self, images, registries, task_vars): +        """Inspect existing images using Skopeo and return all images successfully inspected.""" +        return [ +            image for image in images +            if self.is_image_available(image, registries, task_vars) +        ] + +    def is_image_available(self, image, registries, task_vars): +        for registry in registries: +            if self.is_available_skopeo_image(image, registry, task_vars): +                return True + +        return False + +    def is_available_skopeo_image(self, image, registry, task_vars): +        """Uses Skopeo to determine if required image exists in a given registry.""" + +        cmd_str = "skopeo inspect docker://{registry}/{image}".format( +            registry=registry, +            image=image, +        ) + +        args = { +            "name": "skopeo_inspect", +            "image": self.skopeo_image, +            "command": cmd_str, +            "detach": False, +            "cleanup": True, +        } +        result = self.module_executor("docker_container", args, task_vars) +        return result.get("failed", False) + +    def containerized_docker_images(self, base_name, version): +        return [ +            "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version) +        ] + +    @staticmethod +    def rpm_docker_images(base, version): +        return [ +            "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version) +        ] + +    @staticmethod +    def qualified_docker_images(image_name, version): +        return [ +            "{}-{}:{}".format(image_name, component, version) +            for component in "haproxy-router docker-registry deployer pod".split() +        ] + +    @staticmethod +    def image_from_base_name(base): +        return "".join([base["repo"], "/", base["image"]]) + +    # ensures that the skopeo docker image exists, and updates it +    # with latest if image was already present locally. +    def update_skopeo_image(self, task_vars): +        result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars) +        return result.get("msg", ""), result.get("failed", False), result.get("changed", False) diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 0a6299c9b..32bcd8d08 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -14,11 +14,11 @@ openshift_hosted_router_edits:  openshift_hosted_routers:  - name: router -  replicas: "{{ replicas }}" +  replicas: "{{ replicas | default(1) }}"    namespace: default    serviceaccount: router -  selector: "{{ openshift_hosted_router_selector }}" -  images: "{{ openshift_hosted_router_image }}" +  selector: "{{ openshift_hosted_router_selector | default(None) }}" +  images: "{{ openshift_hosted_router_image | default(None)  }}"    edits: "{{ openshift_hosted_router_edits }}"    stats_port: 1936    ports: diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 14b80304d..570c41ecc 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -72,6 +72,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  - `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.  - `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.  - `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +- `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. +- `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.  When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the  same as above for their non-ops counterparts, but apply to the OPS cluster instance: @@ -88,6 +90,8 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta  - `openshift_logging_es_ops_pvc_prefix`: logging-es-ops  - `openshift_logging_es_ops_recover_after_time`: 5m  - `openshift_logging_es_ops_storage_group`: 65534 +- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. +- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.   - `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.  - `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.  - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 75a6e4d69..d9c9a83d0 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -1,6 +1,4 @@  --- -openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" -openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"  openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | default('false') | bool }}"  openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"  openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" @@ -54,6 +52,18 @@ openshift_logging_kibana_ops_proxy_cpu_limit: null  openshift_logging_kibana_ops_proxy_memory_limit: null  openshift_logging_kibana_ops_replica_count: 1 +#The absolute path on the control node to the cert file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_cert: "" + +#The absolute path on the control node to the key file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_key: "" + +#The absolute path on the control node to the CA file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_ca: "" +  openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"  openshift_logging_fluentd_cpu_limit: 100m  openshift_logging_fluentd_memory_limit: 512Mi @@ -80,6 +90,8 @@ openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_s  openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"  # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml  openshift_logging_es_config: {} +openshift_logging_es_number_of_shards: 1 +openshift_logging_es_number_of_replicas: 0  # allow cluster-admin or cluster-reader to view operations index  openshift_logging_es_ops_allow_cluster_reader: False @@ -99,6 +111,8 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_  openshift_logging_es_ops_recover_after_time: 5m  openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"  openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_number_of_shards: 1 +openshift_logging_es_ops_number_of_replicas: 0  # storage related defaults  openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index 7af17a708..e77da7a24 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -16,12 +16,12 @@    changed_when: false  - name: Generating logging routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml +  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml    tags: routes    vars: -    obj_name: "{{route_info.name}}" -    route_host: "{{route_info.host}}" -    service_name: "{{route_info.name}}" +    obj_name: "logging-kibana" +    route_host: "{{openshift_logging_kibana_hostname}}" +    service_name: "logging-kibana"      tls_key: "{{kibana_key | default('') | b64decode}}"      tls_cert: "{{kibana_cert | default('') | b64decode}}"      tls_ca_cert: "{{kibana_ca | b64decode}}" @@ -31,10 +31,47 @@        component: support        logging-infra: support        provider: openshift -  with_items: -    - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"} -    - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"} -  loop_control: -    loop_var: route_info -  when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops | bool) or route_info.name == 'logging-kibana' +  changed_when: no + +- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }} +  when: +  - openshift_logging_use_ops | bool +  - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}" +  changed_when: false + +- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode  }} +  when: +  - openshift_logging_use_ops | bool +  - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode  }} +  when: +  - openshift_logging_use_ops | bool +  - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }} +  when: +  - openshift_logging_use_ops | bool +  - kibana_ops_ca is not defined +  changed_when: false + +- name: Generating logging ops routes +  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml +  tags: routes +  vars: +    obj_name: "logging-kibana-ops" +    route_host: "{{openshift_logging_kibana_ops_hostname}}" +    service_name: "logging-kibana-ops" +    tls_key: "{{kibana_ops_key | default('') | b64decode}}" +    tls_cert: "{{kibana_ops_cert | default('') | b64decode}}" +    tls_ca_cert: "{{kibana_ops_ca | b64decode}}" +    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" +    edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}" +    labels: +      component: support +      logging-infra: support +      provider: openshift +  when: openshift_logging_use_ops | bool    changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 1b750bcbe..28fad420b 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -53,6 +53,8 @@      deploy_name: "{{item.1}}"      es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"      es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}" +    es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" +    es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"    with_indexed_items:      - "{{ es_dc_pool }}"    check_mode: no @@ -134,6 +136,8 @@      openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"      es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"      es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}" +    es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" +    es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"    with_indexed_items:      - "{{ es_ops_dc_pool | default([]) }}"    when: diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index eb60175c7..c7f4a2f93 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -3,6 +3,17 @@      msg: Only one Fluentd nodeselector key pair should be provided    when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" +- name: Set default image variables based on deployment_type +  include_vars: "{{ item }}" +  with_first_found: +    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "default_images.yml" + +- name: Set logging image facts +  set_fact: +    openshift_logging_image_prefix: "{{ openshift_logging_image_prefix | default(__openshift_logging_image_prefix) }}" +    openshift_logging_image_version: "{{ openshift_logging_image_version | default(__openshift_logging_image_version) }}" +  - name: Create temp directory for doing work in    command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX    register: mktemp diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2 index a030c26b5..07e8c0c98 100644 --- a/roles/openshift_logging/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging/templates/elasticsearch.yml.j2 @@ -6,9 +6,8 @@ script:    indexed: on  index: -  number_of_shards: 1 -  number_of_replicas: 0 -  auto_expand_replicas: 0-2 +  number_of_shards: {{ es_number_of_shards | default ('1') }} +  number_of_replicas: {{ es_number_of_replicas | default ('0') }}    unassigned.node_left.delayed_timeout: 2m    translog:      flush_threshold_size: 256mb @@ -38,6 +37,8 @@ gateway:    recover_after_time: ${RECOVER_AFTER_TIME}  io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] +io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json +io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json  openshift.config:    use_common_data_model: true diff --git a/roles/openshift_logging/vars/default_images.yml b/roles/openshift_logging/vars/default_images.yml new file mode 100644 index 000000000..1a77808f6 --- /dev/null +++ b/roles/openshift_logging/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}" diff --git a/roles/openshift_logging/vars/openshift-enterprise.yml b/roles/openshift_logging/vars/openshift-enterprise.yml new file mode 100644 index 000000000..9679d209a --- /dev/null +++ b/roles/openshift_logging/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(openshift_release | default ('3.5.0') ) }}" diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 5921b7bb7..1d3db8a1a 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -1,8 +1,6 @@  ---  openshift_metrics_start_cluster: True  openshift_metrics_install_metrics: True -openshift_metrics_image_prefix: docker.io/openshift/origin- -openshift_metrics_image_version: latest  openshift_metrics_startup_timeout: 500  openshift_metrics_hawkular_replicas: 1 diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 1eebff3bf..c8d222c60 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -1,4 +1,16 @@  --- + +- name: Set default image variables based on deployment_type +  include_vars: "{{ item }}" +  with_first_found: +    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "default_images.yml" + +- name: Set metrics image facts +  set_fact: +    openshift_metrics_image_prefix: "{{ openshift_metrics_image_prefix | default(__openshift_metrics_image_prefix) }}" +    openshift_metrics_image_version: "{{ openshift_metrics_image_version | default(__openshift_metrics_image_version) }}" +  - name: Create temp directory for doing work in on target    command: mktemp -td openshift-metrics-ansible-XXXXXX    register: mktemp diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2 index 885dd368d..c2e56ba21 100644 --- a/roles/openshift_metrics/templates/pvc.j2 +++ b/roles/openshift_metrics/templates/pvc.j2 @@ -4,7 +4,7 @@ metadata:    name: "{{obj_name}}"  {% if labels is not defined %}    labels: -    logging-infra: support +    metrics-infra: support  {% elif labels %}    labels:  {% for key, value in labels.iteritems() %} diff --git a/roles/openshift_metrics/vars/default_images.yml b/roles/openshift_metrics/vars/default_images.yml new file mode 100644 index 000000000..678c4104c --- /dev/null +++ b/roles/openshift_metrics/vars/default_images.yml @@ -0,0 +1,3 @@ +--- +__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('docker.io/openshift/origin-') }}" +__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default('latest') }}" diff --git a/roles/openshift_metrics/vars/openshift-enterprise.yml b/roles/openshift_metrics/vars/openshift-enterprise.yml new file mode 100644 index 000000000..f28c3ce48 --- /dev/null +++ b/roles/openshift_metrics/vars/openshift-enterprise.yml @@ -0,0 +1,3 @@ +--- +__openshift_metrics_image_prefix: "{{ openshift_hosted_metrics_deployer_prefix | default('registry.access.redhat.com/openshift3/') }}" +__openshift_metrics_image_version: "{{ openshift_hosted_metrics_deployer_version | default(openshift_release | default ('3.5.0') ) }}" diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index f052ed505..6ae8dbc12 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -51,24 +51,28 @@    failed_when: false    when: openshift.common.is_containerized | bool +- name: Stop rpm based services +  service: +    name: "{{ item }}" +    state: stopped +  with_items: +  - "{{ openshift.common.service_type }}-node" +  - openvswitch +  failed_when: false +  when: not openshift.common.is_containerized | bool +  - name: Upgrade openvswitch    package:      name: openvswitch      state: latest -  register: ovs_pkg    when: not openshift.common.is_containerized | bool  - name: Restart openvswitch    systemd: -    name: "{{ item }}" -    state: restarted -  with_items: -  - ovs-vswitchd -  - ovsdb-server -  - openvswitch +    name: openvswitch +    state: started    when:    - not openshift.common.is_containerized | bool -  - ovs_pkg | changed  # Mandatory Docker restart, ensure all containerized services are running:  - include: docker/restart.yml  | 
