summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/calico/handlers/main.yml4
-rw-r--r--roles/contiv/tasks/netplugin.yml4
-rw-r--r--roles/docker/handlers/main.yml3
-rw-r--r--roles/docker/tasks/package_docker.yml7
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml12
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/lib_openshift/library/oc_storageclass.py1688
-rw-r--r--roles/lib_openshift/src/ansible/oc_storageclass.py32
-rw-r--r--roles/lib_openshift/src/class/oc_storageclass.py155
-rw-r--r--roles/lib_openshift/src/doc/storageclass86
-rw-r--r--roles/lib_openshift/src/lib/storageclass.py76
-rw-r--r--roles/lib_openshift/src/sources.yml11
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_storageclass.yml87
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_storageclass.py93
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml9
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml19
-rw-r--r--roles/openshift_examples/tasks/main.yml16
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py101
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py3
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py28
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py4
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py137
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py4
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py2
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py32
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py30
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml2
-rw-r--r--roles/openshift_hosted/templates/registry_config.j25
-rw-r--r--roles/openshift_node/handlers/main.yml13
-rw-r--r--roles/openshift_node/tasks/main.yml16
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml2
-rw-r--r--roles/openshift_node/templates/node.service.j21
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml4
-rw-r--r--roles/openshift_node_upgrade/README.md5
-rw-r--r--roles/openshift_node_upgrade/handlers/main.yml20
-rw-r--r--roles/openshift_node_upgrade/tasks/docker/upgrade.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml2
-rw-r--r--roles/openshift_node_upgrade/templates/node.service.j21
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml6
-rw-r--r--roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js2
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml53
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml15
-rw-r--r--roles/openshift_service_catalog/templates/sc_role_patching.j226
-rw-r--r--roles/openshift_storage_glusterfs/README.md12
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml22
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml11
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml11
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml38
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml6
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml3
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j22
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j214
58 files changed, 2820 insertions, 180 deletions
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml
index 53cecfcc3..67fc0065f 100644
--- a/roles/calico/handlers/main.yml
+++ b/roles/calico/handlers/main.yml
@@ -8,3 +8,7 @@
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
+ register: l_docker_restart_docker_in_calico_result
+ until: not l_docker_restart_docker_in_calico_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index 0847c92bc..e861a2591 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -108,6 +108,10 @@
name: "{{ openshift.docker.service_name }}"
state: restarted
when: docker_updated|changed
+ register: l_docker_restart_docker_in_contiv_result
+ until: not l_docker_restart_docker_in_contiv_result | failed
+ retries: 3
+ delay: 30
- name: Netplugin | Enable Netplugin
service:
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 3a4f4ba92..591367467 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -6,9 +6,8 @@
state: restarted
register: r_docker_restart_docker_result
until: not r_docker_restart_docker_result | failed
- retries: 1
+ retries: 3
delay: 30
-
when: not docker_service_status_changed | default(false) | bool
- name: restart udev
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index c82d8659a..5f536005d 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -123,9 +123,12 @@
enabled: yes
state: started
daemon_reload: yes
- register: start_result
+ register: r_docker_package_docker_start_result
+ until: not r_docker_package_docker_start_result | failed
+ retries: 3
+ delay: 30
- set_fact:
- docker_service_status_changed: start_result | changed
+ docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}"
- meta: flush_handlers
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index d8c5ccfd3..57a84bc2c 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -46,6 +46,11 @@
state: stopped
daemon_reload: yes
ignore_errors: True
+ register: r_docker_systemcontainer_docker_stop_result
+ until: not r_docker_systemcontainer_docker_stop_result | failed
+ retries: 3
+ delay: 30
+
# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
# regexp: the line starts with or without #, followed by the string
@@ -160,9 +165,12 @@
enabled: yes
state: started
daemon_reload: yes
- register: start_result
+ register: r_docker_systemcontainer_docker_start_result
+ until: not r_docker_systemcontainer_docker_start_result | failed
+ retries: 3
+ delay: 30
- set_fact:
- docker_service_status_changed: start_result | changed
+ docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
- meta: flush_handlers
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index c60c2115a..02f5a5f64 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -8,3 +8,7 @@
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
+ register: l_docker_restart_docker_in_flannel_result
+ until: not l_docker_restart_docker_in_flannel_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py
new file mode 100644
index 000000000..d5375e27a
--- /dev/null
+++ b/roles/lib_openshift/library/oc_storageclass.py
@@ -0,0 +1,1688 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/storageclass -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_storageclass
+short_description: Create, modify, and idempotently manage openshift storageclasses.
+description:
+ - Manage openshift storageclass objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: False
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ provisioner:
+ description:
+ - Any annotations to add to the storageclass
+ required: false
+ default: 'aws-ebs'
+ aliases: []
+ default_storage_class:
+ description:
+ - Whether or not this is the default storage class
+ required: false
+ default: False
+ aliases: []
+ parameters:
+ description:
+ - A dictionary with the parameters to configure the storageclass. This will be based on provisioner
+ required: false
+ default: None
+ aliases: []
+ api_version:
+ description:
+ - The api version.
+ required: false
+ default: v1
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: get storageclass
+ run_once: true
+ oc_storageclass:
+ name: gp2
+ state: list
+ register: registry_sc_out
+
+- name: create the storageclass
+ oc_storageclass:
+ run_once: true
+ name: gp2
+ parameters:
+ type: gp2
+ encrypted: 'true'
+ kmsKeyId: '<full kms key arn>'
+ provisioner: aws-ebs
+ default_storage_class: False
+ register: sc_out
+ notify:
+ - restart openshift master services
+'''
+
+# -*- -*- -*- End included fragment: doc/storageclass -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+
+class YeditException(Exception): # pragma: no cover
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object): # pragma: no cover
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for separator '''
+ return self._separator
+
+ @separator.setter
+ def separator(self, inc_sep):
+ ''' setter method for separator '''
+ self._separator = inc_sep
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. {}'.format(err))
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
+ 'value=[{}] type=[{}]'.format(value, type(value)))
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is None:
+ return (False, self.yaml_dict)
+
+ # When path equals "" it is a special case.
+ # "" refers to the root of the document
+ # Only update the root path (entire document) when its a list or dict
+ if path == '':
+ if isinstance(result, list) or isinstance(result, dict):
+ self.yaml_dict = result
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result is not None:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # There is a special case where '' will turn into None after yaml loading it so skip
+ if isinstance(inc_value, str) and inc_value == '':
+ pass
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ elif isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.safe_load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming value. ' +
+ 'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
+
+ return inc_value
+
+ @staticmethod
+ def process_edits(edits, yamlfile):
+ '''run through a list of edits and process them one-by-one'''
+ results = []
+ for edit in edits:
+ value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
+ if edit.get('action') == 'update':
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(
+ Yedit.parse_value(edit.get('curr_value')),
+ edit.get('curr_value_format'))
+
+ rval = yamlfile.update(edit['key'],
+ value,
+ edit.get('index'),
+ curr_value)
+
+ elif edit.get('action') == 'append':
+ rval = yamlfile.append(edit['key'], value)
+
+ else:
+ rval = yamlfile.put(edit['key'], value)
+
+ if rval[0]:
+ results.append({'key': edit['key'], 'edit': rval[1]})
+
+ return {'changed': len(results) > 0, 'results': results}
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=params['src'],
+ backup=params['backup'],
+ separator=params['separator'])
+
+ state = params['state']
+
+ if params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and state != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
+ 'file exists, that it is has correct permissions, and is valid yaml.'}
+
+ if state == 'list':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['key']:
+ rval = yamlfile.get(params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': state}
+
+ elif state == 'absent':
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if params['update']:
+ rval = yamlfile.pop(params['key'], params['value'])
+ else:
+ rval = yamlfile.delete(params['key'])
+
+ if rval[0] and params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': state}
+
+ elif state == 'present':
+ # check if content is different than what is in the file
+ if params['content']:
+ content = Yedit.parse_value(params['content'], params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ params['value'] is None:
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+
+ yamlfile.yaml_dict = content
+
+ # If we were passed a key, value then
+ # we enapsulate it in a list and process it
+ # Key, Value passed to the module : Converted to Edits list #
+ edits = []
+ _edit = {}
+ if params['value'] is not None:
+ _edit['value'] = params['value']
+ _edit['value_type'] = params['value_type']
+ _edit['key'] = params['key']
+
+ if params['update']:
+ _edit['action'] = 'update'
+ _edit['curr_value'] = params['curr_value']
+ _edit['curr_value_format'] = params['curr_value_format']
+ _edit['index'] = params['index']
+
+ elif params['append']:
+ _edit['action'] = 'append'
+
+ edits.append(_edit)
+
+ elif params['edits'] is not None:
+ edits = params['edits']
+
+ if edits:
+ results = Yedit.process_edits(edits, yamlfile)
+
+ # if there were changes and a src provided to us we need to write
+ if results['changed'] and params['src']:
+ yamlfile.write()
+
+ return {'changed': results['changed'], 'result': results['results'], 'state': state}
+
+ # no edits to make
+ if params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': state}
+
+ # We were passed content but no src, key or value, or edits. Return contents in memory
+ return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ # We are removing the 'resourceVersion' to handle
+ # a race condition when modifying oc objects
+ yed = Yedit(fname)
+ results = yed.delete('metadata.resourceVersion')
+ if results[0]:
+ yed.write()
+
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, name=None, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+ else:
+ raise OpenShiftCLIError('Either name or selector is required when calling delete.')
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, name=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector is not None:
+ cmd.append('--selector={}'.format(selector))
+ elif name is not None:
+ cmd.append(name)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ cmd.append('--schedulable={}'.format(schedulable))
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector={}'.format(selector))
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector={}'.format(pod_selector))
+
+ if grace_period:
+ cmd.append('--grace-period={}'.format(int(grace_period)))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "cmd": ' '.join(cmds)}
+
+ if output_type == 'json':
+ rval['results'] = {}
+ if output and stdout:
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as verr:
+ if "No JSON object could be decoded" in verr.args:
+ rval['err'] = verr.args
+ elif output_type == 'raw':
+ rval['results'] = stdout if output else ''
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if 'err' in rval or returncode != 0:
+ rval.update({"stderr": stderr,
+ "stdout": stdout})
+
+ return rval
+
+
+class Utils(object): # pragma: no cover
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
+ rval = []
+ for key in sorted(self.config_options.keys()):
+ data = self.config_options[key]
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/storageclass.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class StorageClassConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ provisioner=None,
+ parameters=None,
+ annotations=None,
+ default_storage_class="false",
+ api_version='v1',
+ kubeconfig='/etc/origin/master/admin.kubeconfig'):
+ ''' constructor for handling storageclass options '''
+ self.name = name
+ self.parameters = parameters
+ self.annotations = annotations
+ self.provisioner = provisioner
+ self.api_version = api_version
+ self.default_storage_class = str(default_storage_class).lower()
+ self.kubeconfig = kubeconfig
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a storageclass dict '''
+ self.data['apiVersion'] = self.api_version
+ self.data['kind'] = 'StorageClass'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+
+ self.data['metadata']['annotations'] = {}
+ if self.annotations is not None:
+ self.data['metadata']['annotations'] = self.annotations
+
+ self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
+ self.default_storage_class
+
+ if self.provisioner is None:
+ self.data['provisioner'] = 'kubernetes.io/aws-ebs'
+ else:
+ self.data['provisioner'] = self.provisioner
+
+ self.data['parameters'] = {}
+ if self.parameters is not None:
+ self.data['parameters'].update(self.parameters)
+
+ # default to aws if no params were passed
+ else:
+ self.data['parameters']['type'] = 'gp2'
+
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class StorageClass(Yedit):
+ ''' Class to model the oc storageclass object '''
+ annotations_path = "metadata.annotations"
+ provisioner_path = "provisioner"
+ parameters_path = "parameters"
+ kind = 'StorageClass'
+
+ def __init__(self, content):
+ '''StorageClass constructor'''
+ super(StorageClass, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' get a list of ports '''
+ return self.get(StorageClass.annotations_path) or {}
+
+ def get_parameters(self):
+ ''' get the service selector'''
+ return self.get(StorageClass.parameters_path) or {}
+
+# -*- -*- -*- End included fragment: lib/storageclass.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_storageclass.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCStorageClass(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'storageclass'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCStorageClass '''
+ super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
+ self.config = config
+ self.storage_class = None
+
+ def exists(self):
+ ''' return whether a storageclass exists'''
+ if self.storage_class:
+ return True
+
+ return False
+
+ def get(self):
+ '''return storageclass '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.storage_class = StorageClass(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # parameters are currently unable to be updated. need to delete and recreate
+ self.delete()
+ # pause here and attempt to wait for delete.
+ # Better option would be to poll
+ import time
+ time.sleep(5)
+ return self.create()
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # check if params have updated
+ if self.storage_class.get_parameters() != self.config.parameters:
+ return True
+
+ for anno_key, anno_value in self.storage_class.get_annotations().items():
+ if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = StorageClassConfig(params['name'],
+ provisioner="kubernetes.io/{}".format(params['provisioner']),
+ parameters=params['parameters'],
+ annotations=params['annotations'],
+ api_version="storage.k8s.io/{}".format(params['api_version']),
+ default_storage_class=params.get('default_storage_class', 'false'),
+ kubeconfig=params['kubeconfig'],
+ )
+
+ oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sc.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sc.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sc.needs_update():
+ api_rval = oc_sc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
+
+# -*- -*- -*- End included fragment: class/oc_storageclass.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_storageclass.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for storageclass
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
+ parameters=dict(default=None, type='dict'),
+ provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ api_version=dict(default='v1', type='str'),
+ default_storage_class=dict(default="false", type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCStorageClass.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_storageclass.py -*- -*- -*-
diff --git a/roles/lib_openshift/src/ansible/oc_storageclass.py b/roles/lib_openshift/src/ansible/oc_storageclass.py
new file mode 100644
index 000000000..2bd8f18d5
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_storageclass.py
@@ -0,0 +1,32 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for storageclass
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ annotations=dict(default=None, type='dict'),
+ parameters=dict(default=None, type='dict'),
+ provisioner=dict(default='aws-ebs', type='str', choices=['aws-ebs', 'gce-pd', 'glusterfs', 'cinder']),
+ api_version=dict(default='v1', type='str'),
+ default_storage_class=dict(default="false", type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCStorageClass.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_storageclass.py b/roles/lib_openshift/src/class/oc_storageclass.py
new file mode 100644
index 000000000..aced586ae
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_storageclass.py
@@ -0,0 +1,155 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCStorageClass(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'storageclass'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCStorageClass '''
+ super(OCStorageClass, self).__init__(None, kubeconfig=config.kubeconfig, verbose=verbose)
+ self.config = config
+ self.storage_class = None
+
+ def exists(self):
+ ''' return whether a storageclass exists'''
+ if self.storage_class:
+ return True
+
+ return False
+
+ def get(self):
+ '''return storageclass '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.storage_class = StorageClass(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # parameters are currently unable to be updated. need to delete and recreate
+ self.delete()
+ # pause here and attempt to wait for delete.
+ # Better option would be to poll
+ import time
+ time.sleep(5)
+ return self.create()
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # check if params have updated
+ if self.storage_class.get_parameters() != self.config.parameters:
+ return True
+
+ for anno_key, anno_value in self.storage_class.get_annotations().items():
+ if 'is-default-class' in anno_key and anno_value != self.config.default_storage_class:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = StorageClassConfig(params['name'],
+ provisioner="kubernetes.io/{}".format(params['provisioner']),
+ parameters=params['parameters'],
+ annotations=params['annotations'],
+ api_version="storage.k8s.io/{}".format(params['api_version']),
+ default_storage_class=params.get('default_storage_class', 'false'),
+ kubeconfig=params['kubeconfig'],
+ )
+
+ oc_sc = OCStorageClass(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sc.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sc.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sc.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sc.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sc.needs_update():
+ api_rval = oc_sc.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sc.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
diff --git a/roles/lib_openshift/src/doc/storageclass b/roles/lib_openshift/src/doc/storageclass
new file mode 100644
index 000000000..5a7320d55
--- /dev/null
+++ b/roles/lib_openshift/src/doc/storageclass
@@ -0,0 +1,86 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_storageclass
+short_description: Create, modify, and idempotently manage openshift storageclasses.
+description:
+ - Manage openshift storageclass objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: False
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ provisioner:
+ description:
+ - Any annotations to add to the storageclass
+ required: false
+ default: 'aws-ebs'
+ aliases: []
+ default_storage_class:
+ description:
+ - Whether or not this is the default storage class
+ required: false
+ default: False
+ aliases: []
+ parameters:
+ description:
+ - A dictionary with the parameters to configure the storageclass. This will be based on provisioner
+ required: false
+ default: None
+ aliases: []
+ api_version:
+ description:
+ - The api version.
+ required: false
+ default: v1
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: get storageclass
+ run_once: true
+ oc_storageclass:
+ name: gp2
+ state: list
+ register: registry_sc_out
+
+- name: create the storageclass
+ oc_storageclass:
+ run_once: true
+ name: gp2
+ parameters:
+ type: gp2
+ encrypted: 'true'
+ kmsKeyId: '<full kms key arn>'
+ provisioner: aws-ebs
+ default_storage_class: False
+ register: sc_out
+ notify:
+ - restart openshift master services
+'''
diff --git a/roles/lib_openshift/src/lib/storageclass.py b/roles/lib_openshift/src/lib/storageclass.py
new file mode 100644
index 000000000..ef12a8d2d
--- /dev/null
+++ b/roles/lib_openshift/src/lib/storageclass.py
@@ -0,0 +1,76 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class StorageClassConfig(object):
+ ''' Handle service options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ name,
+ provisioner=None,
+ parameters=None,
+ annotations=None,
+ default_storage_class="false",
+ api_version='v1',
+ kubeconfig='/etc/origin/master/admin.kubeconfig'):
+ ''' constructor for handling storageclass options '''
+ self.name = name
+ self.parameters = parameters
+ self.annotations = annotations
+ self.provisioner = provisioner
+ self.api_version = api_version
+ self.default_storage_class = str(default_storage_class).lower()
+ self.kubeconfig = kubeconfig
+ self.data = {}
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' instantiates a storageclass dict '''
+ self.data['apiVersion'] = self.api_version
+ self.data['kind'] = 'StorageClass'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+
+ self.data['metadata']['annotations'] = {}
+ if self.annotations is not None:
+ self.data['metadata']['annotations'] = self.annotations
+
+ self.data['metadata']['annotations']['storageclass.beta.kubernetes.io/is-default-class'] = \
+ self.default_storage_class
+
+ if self.provisioner is None:
+ self.data['provisioner'] = 'kubernetes.io/aws-ebs'
+ else:
+ self.data['provisioner'] = self.provisioner
+
+ self.data['parameters'] = {}
+ if self.parameters is not None:
+ self.data['parameters'].update(self.parameters)
+
+ # default to aws if no params were passed
+ else:
+ self.data['parameters']['type'] = 'gp2'
+
+
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class StorageClass(Yedit):
+ ''' Class to model the oc storageclass object '''
+ annotations_path = "metadata.annotations"
+ provisioner_path = "provisioner"
+ parameters_path = "parameters"
+ kind = 'StorageClass'
+
+ def __init__(self, content):
+ '''StorageClass constructor'''
+ super(StorageClass, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' get a list of ports '''
+ return self.get(StorageClass.annotations_path) or {}
+
+ def get_parameters(self):
+ ''' get the service selector'''
+ return self.get(StorageClass.parameters_path) or {}
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index 9fa2a6c0e..e9b6bf261 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -263,6 +263,17 @@ oc_service.py:
- class/oc_service.py
- ansible/oc_service.py
+oc_storageclass.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/storageclass
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/storageclass.py
+- class/oc_storageclass.py
+- ansible/oc_storageclass.py
+
oc_user.py:
- doc/generated
- doc/license
diff --git a/roles/lib_openshift/src/test/integration/oc_storageclass.yml b/roles/lib_openshift/src/test/integration/oc_storageclass.yml
new file mode 100755
index 000000000..c82f9dedb
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_storageclass.yml
@@ -0,0 +1,87 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_storageclass.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create a storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ default_storage_class: "true"
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - "sc_out.results.results[0]['metadata']['name'] == 'testsc'"
+ - sc_out.changed
+ - "sc_out.results.results[0]['parameters']['type'] == 'gp2'"
+ msg: storageclass create failed.
+
+ # Test idempotent create
+ - name: NOOP create the storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ default_storage_class: "true"
+ register: sc_out
+
+ - assert:
+ that:
+ - "sc_out.results.results[0]['metadata']['name'] == 'testsc'"
+ - sc_out.changed == False
+ msg: storageclass create failed. No changes expected
+
+ - name: test list storageclass
+ oc_storageclass:
+ name: testsc
+ state: list
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that: "sc_out.results[0]['metadata']['name'] == 'testsc'"
+ msg: storageclass list failed
+
+ - name: update the storageclass
+ oc_storageclass:
+ name: testsc
+ parameters:
+ type: gp2
+ encrypted: "true"
+ default_storage_class: "true"
+ register: sc_out
+
+ - assert:
+ that: "sc_out.results.results[0]['parameters']['encrypted'] == 'true'"
+ msg: storageclass update failed
+
+ - name: oc delete storageclass
+ oc_storageclass:
+ name: testsc
+ state: absent
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - "sc_out.results['returncode'] == 0"
+ - "sc_out.results.results == {}"
+ msg: storageclass delete failed
+
+ - name: oc get storageclass
+ oc_storageclass:
+ name: testsc
+ state: list
+ register: sc_out
+ - debug: var=sc_out
+
+ - assert:
+ that:
+ - sc_out.changed == False
+ - "sc_out.results == [{}]"
+ msg: storageclass get failed
diff --git a/roles/lib_openshift/src/test/unit/test_oc_storageclass.py b/roles/lib_openshift/src/test/unit/test_oc_storageclass.py
new file mode 100755
index 000000000..4fd02a8b1
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_storageclass.py
@@ -0,0 +1,93 @@
+'''
+ Unit tests for oc serviceaccount
+'''
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_storageclass import OCStorageClass # noqa: E402
+
+
+class OCStorageClassTest(unittest.TestCase):
+ '''
+ Test class for OCStorageClass
+ '''
+ params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'testsc',
+ 'provisioner': 'kubernetes.io/aws-ebs',
+ 'annotations': {'storageclass.beta.kubernetes.io/is-default-class': "true"},
+ 'parameters': {'type': 'gp2'},
+ 'api_version': 'v1',
+ 'default_storage_class': 'true'}
+
+ @mock.patch('oc_storageclass.locate_oc_binary')
+ @mock.patch('oc_storageclass.Utils.create_tmpfile_copy')
+ @mock.patch('oc_storageclass.OCStorageClass._run')
+ def test_adding_a_storageclass(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary):
+ ''' Testing adding a storageclass '''
+
+ # Arrange
+
+ # run_ansible input parameters
+
+ valid_result_json = '''{
+ "kind": "StorageClass",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testsc",
+ "selfLink": "/apis/storage.k8s.io/v1/storageclasses/gp2",
+ "uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
+ "resourceVersion": "2828",
+ "creationTimestamp": "2017-01-29T22:07:19Z",
+ "annotations": {"storageclass.beta.kubernetes.io/is-default-class": "true"}
+ },
+ "provisioner": "kubernetes.io/aws-ebs",
+ "parameters": {"type": "gp2"}
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error from server: storageclass "testsc" not found'),
+
+ # Second call to mock
+ (0, 'storageclass "testsc" created', ''),
+
+ # Third call to mock
+ (0, valid_result_json, ''),
+ ]
+
+ mock_oc_binary.side_effect = [
+ 'oc'
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCStorageClass.run_ansible(OCStorageClassTest.params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
+ mock.call(['oc', 'create', '-f', mock.ANY], None),
+ mock.call(['oc', 'get', 'storageclass', 'testsc', '-o', 'json'], None),
+ ])
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index 66ffd2a73..bda83c933 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -1,9 +1,12 @@
---
openshift_storageclass_defaults:
aws:
- name: gp2
provisioner: kubernetes.io/aws-ebs
- type: gp2
+ name: gp2
+ parameters:
+ type: gp2
+ kmsKeyId: ''
+ encrypted: 'false'
gce:
name: standard
provisioner: kubernetes.io/gce-pd
@@ -11,4 +14,4 @@ openshift_storageclass_defaults:
openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
-openshift_storageclass_type: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['type'] }}"
+openshift_storageclass_parameters: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['parameters'] }}"
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
index 408fc17c7..fd5e4fabe 100644
--- a/roles/openshift_default_storage_class/tasks/main.yml
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -1,19 +1,12 @@
---
# Install default storage classes in GCE & AWS
- name: Ensure storageclass object
- oc_obj:
+ oc_storageclass:
kind: storageclass
name: "{{ openshift_storageclass_name }}"
- content:
- path: /tmp/openshift_storageclass
- data:
- kind: StorageClass
- apiVersion: storage.k8s.io/v1beta1
- metadata:
- name: "{{ openshift_storageclass_name }}"
- annotations:
- storageclass.beta.kubernetes.io/is-default-class: "true"
- provisioner: "{{ openshift_storageclass_provisioner }}"
- parameters:
- type: "{{ openshift_storageclass_type }}"
+ default_storage_class: "true"
+ parameters:
+ type: "{{ openshift_storageclass_parameters.type | default('gp2') }}"
+ encrypted: "{{ openshift_storageclass_parameters.encrypted | default('false') | string }}"
+ kmsKeyId: "{{ openshift_storageclass_parameters.kmsKeyId | default('') }}"
run_once: true
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 551e21e72..1a4562776 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -53,7 +53,7 @@
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ item }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}
when: openshift_examples_load_rhel | bool
with_items:
- "{{ rhel_image_streams }}"
@@ -63,7 +63,7 @@
- name: Import Centos Image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ centos_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ centos_image_streams }}
when: openshift_examples_load_centos | bool
register: oex_import_centos_streams
failed_when: "'already exists' not in oex_import_centos_streams.stderr and oex_import_centos_streams.rc != 0"
@@ -71,7 +71,7 @@
- name: Import db templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ db_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}
when: openshift_examples_load_db_templates | bool
register: oex_import_db_templates
failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0"
@@ -88,7 +88,7 @@
- "{{ quickstarts_base }}/django.json"
- name: Remove defunct quickstart templates from openshift namespace
- command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}"
+ command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- nodejs-example
- cakephp-example
@@ -100,7 +100,7 @@
- name: Import quickstart-templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}
when: openshift_examples_load_quickstarts | bool
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
@@ -114,7 +114,7 @@
- "{{ xpaas_templates_base }}/sso70-basic.json"
- name: Remove old xPaas templates from openshift namespace
- command: "{{ openshift.common.client_binary }} -n openshift delete templates/{{ item }}"
+ command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"
with_items:
- sso70-basic
register: oex_delete_old_xpaas_templates
@@ -123,7 +123,7 @@
- name: Import xPaas image streams
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_image_streams }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_streams
failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0"
@@ -131,7 +131,7 @@
- name: Import xPaas templates
command: >
- {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ xpaas_templates_base }}
+ {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}
when: openshift_examples_load_xpaas | bool
register: oex_import_xpaas_templates
failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 04b5dc86b..beef77896 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1643,38 +1643,75 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
- # No openshift_no_proxy settings detected, empty list for now
- if 'no_proxy' not in common:
- common['no_proxy'] = []
-
- # _no_proxy settings set. It is just a simple string, not a
- # list or anything
- elif 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
- # no_proxy is now a list of all the comma-separated items
- # in the _no_proxy value
- common['no_proxy'] = common['no_proxy'].split(",")
-
- # at this point common['no_proxy'] is a LIST datastructure. It
- # may be empty, or it may contain some hostnames or ranges.
-
- # We always add local dns domain, the service domain, and
- # ourselves, no matter what (if you are setting any
- # NO_PROXY values)
- common['no_proxy'].append('.svc')
- common['no_proxy'].append('.' + common['dns_domain'])
- common['no_proxy'].append(common['hostname'])
-
- # You are also setting system proxy vars, openshift_http_proxy/openshift_https_proxy
- if 'http_proxy' in common or 'https_proxy' in common:
- # You want to generate no_proxy hosts and it's a boolean value
- if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
- # And you want to set up no_proxy for internal hostnames
- if 'no_proxy_internal_hostnames' in common:
- # Split the internal_hostnames string by a comma
- # and add that list to the overall no_proxy list
- common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
-
- common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
+ ######################################################################
+ # We can exit early now if we don't need to set any proxy facts
+ proxy_params = ['no_proxy', 'https_proxy', 'http_proxy']
+ # If any of the known Proxy Params (pp) are defined
+ proxy_settings_defined = any(
+ [True for pp in proxy_params if pp in common]
+ )
+
+ if not proxy_settings_defined:
+ common['no_proxy'] = ''
+ return facts
+
+ # As of 3.6 if ANY of the proxy parameters are defined in the
+ # inventory then we MUST add certain domains to the NO_PROXY
+ # environment variable.
+
+ ######################################################################
+
+ # Spot to build up some data we may insert later
+ raw_no_proxy_list = []
+
+ # Automatic 3.6 NO_PROXY additions if a proxy is in use
+ svc_cluster_name = ['.svc', '.' + common['dns_domain'], common['hostname']]
+
+ # auto_hosts: Added to NO_PROXY list if any proxy params are
+ # set in the inventory. This a list of the FQDNs of all
+ # cluster hosts:
+ auto_hosts = common['no_proxy_internal_hostnames'].split(',')
+
+ # custom_no_proxy_hosts: If you define openshift_no_proxy in
+ # inventory we automatically add those hosts to the list:
+ if 'no_proxy' in common:
+ custom_no_proxy_hosts = common['no_proxy'].split(',')
+ else:
+ custom_no_proxy_hosts = []
+
+ # This should exist no matter what. Defaults to true.
+ if 'generate_no_proxy_hosts' in common:
+ generate_no_proxy_hosts = safe_get_bool(common['generate_no_proxy_hosts'])
+
+ ######################################################################
+
+ # You set a proxy var. Now we are obliged to add some things
+ raw_no_proxy_list = svc_cluster_name + custom_no_proxy_hosts
+
+ # You did not turn openshift_generate_no_proxy_hosts to False
+ if generate_no_proxy_hosts:
+ raw_no_proxy_list.extend(auto_hosts)
+
+ ######################################################################
+
+ # Was anything actually added? There should be something by now.
+ processed_no_proxy_list = sort_unique(raw_no_proxy_list)
+ if processed_no_proxy_list != list():
+ common['no_proxy'] = ','.join(processed_no_proxy_list)
+ else:
+ # Somehow we got an empty list. This should have been
+ # skipped by now in the 'return' earlier. If
+ # common['no_proxy'] is DEFINED it will cause unexpected
+ # behavior and bad templating. Ensure it does not
+ # exist. Even an empty list or string will have undesired
+ # side-effects.
+ del common['no_proxy']
+
+ ######################################################################
+ # In case you were wondering, because 'common' is a reference
+ # to the object facts['common'], there is no need to re-assign
+ # it.
+
return facts
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 64c29a8d9..443b76ea1 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -39,7 +39,8 @@ class CallbackModule(CallbackBase):
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
- self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
+ if not ignore_errors:
+ self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 4c205e48c..4f43ee751 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -19,6 +19,10 @@ the inventory, the version comparison checks just pass.
'''
from ansible.module_utils.basic import AnsibleModule
+# NOTE: because of the dependency on yum (Python 2-only), this module does not
+# work under Python 3. But since we run unit tests against both Python 2 and
+# Python 3, we use six for cross compatibility in this module alone:
+from ansible.module_utils.six import string_types
IMPORT_EXCEPTION = None
try:
@@ -122,12 +126,15 @@ def _check_precise_version_found(pkgs, expected_pkgs_dict):
for pkg in pkgs:
if pkg.name not in expected_pkgs_dict:
continue
- # does the version match, to the precision requested?
- # and, is it strictly greater, at the precision requested?
- expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
- match_version = '.'.join(pkg.version.split('.')[:expected_pkg_version.count('.') + 1])
- if match_version == expected_pkg_version:
- pkgs_precise_version_found.add(pkg.name)
+ expected_pkg_versions = expected_pkgs_dict[pkg.name]["version"]
+ if isinstance(expected_pkg_versions, string_types):
+ expected_pkg_versions = [expected_pkg_versions]
+ for expected_pkg_version in expected_pkg_versions:
+ # does the version match, to the precision requested?
+ # and, is it strictly greater, at the precision requested?
+ match_version = '.'.join(pkg.version.split('.')[:expected_pkg_version.count('.') + 1])
+ if match_version == expected_pkg_version:
+ pkgs_precise_version_found.add(pkg.name)
not_found = []
for name, pkg in expected_pkgs_dict.items():
@@ -157,8 +164,13 @@ def _check_higher_version_found(pkgs, expected_pkgs_dict):
for pkg in pkgs:
if pkg.name not in expected_pkg_names:
continue
- expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
- req_release_arr = [int(segment) for segment in expected_pkg_version.split(".")]
+ expected_pkg_versions = expected_pkgs_dict[pkg.name]["version"]
+ if isinstance(expected_pkg_versions, string_types):
+ expected_pkg_versions = [expected_pkg_versions]
+ # NOTE: the list of versions is assumed to be sorted so that the highest
+ # desirable version is the last.
+ highest_desirable_version = expected_pkg_versions[-1]
+ req_release_arr = [int(segment) for segment in highest_desirable_version.split(".")]
version = [int(segment) for segment in pkg.version.split(".")]
too_high = version[:len(req_release_arr)] > req_release_arr
higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index 8d0fbcc9c..e80691ef3 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -17,7 +17,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
tags = ["pre-install", "health", "preflight"]
dependencies = ["python-docker-py"]
- storage_drivers = ["devicemapper", "overlay2"]
+ storage_drivers = ["devicemapper", "overlay", "overlay2"]
max_thinpool_data_usage_percent = 90.0
max_thinpool_meta_usage_percent = 90.0
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
index 442f407b1..551e8dfa0 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/kibana.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -62,7 +62,7 @@ class Kibana(LoggingCheck):
# TODO(lmeyer): give users option to validate certs
status_code=302,
)
- result = self.execute_module('uri', args, task_vars)
+ result = self.execute_module('uri', args, None, task_vars)
if result.get('failed'):
return result['msg']
return None
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
index 05b4d300c..6e951e82c 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/logging.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -54,12 +54,12 @@ class LoggingCheck(OpenShiftCheck):
"""Returns: list of pods not in a ready and running state"""
return [
pod for pod in pods
- if any(
+ if not pod.get("status", {}).get("containerStatuses") or any(
container['ready'] is False
for container in pod['status']['containerStatuses']
) or not any(
condition['type'] == 'Ready' and condition['status'] == 'True'
- for condition in pod['status']['conditions']
+ for condition in pod['status'].get('conditions', [])
)
]
@@ -78,7 +78,7 @@ class LoggingCheck(OpenShiftCheck):
"extra_args": list(extra_args) if extra_args else [],
}
- result = execute_module("ocutil", args, task_vars)
+ result = execute_module("ocutil", args, None, task_vars)
if result.get("failed"):
msg = (
'Unexpected error using `oc` to validate the logging stack components.\n'
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 6a76bb93d..204752bd0 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -10,8 +10,8 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
tags = ["preflight"]
openshift_to_ovs_version = {
- "3.6": "2.6",
- "3.5": "2.6",
+ "3.6": ["2.6", "2.7"],
+ "3.5": ["2.6", "2.7"],
"3.4": "2.4",
}
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
index 697805dd2..4100f6c70 100644
--- a/roles/openshift_health_checker/test/aos_version_test.py
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -18,7 +18,43 @@ expected_pkgs = {
}
-@pytest.mark.parametrize('pkgs, expect_not_found', [
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
+ (
+ # all found
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
+ expected_pkgs,
+ ),
+ (
+ # found with more specific version
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
+ expected_pkgs,
+ ),
+ (
+ [Package('ovs', '2.6'), Package('ovs', '2.4')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+ (
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_precise_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
@@ -55,14 +91,6 @@ expected_pkgs = {
}, # not the right version
),
(
- [Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
- {}, # all found
- ),
- (
- [Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
- {}, # found with more specific version
- ),
- (
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
@@ -73,64 +101,86 @@ expected_pkgs = {
}, # eggs found with multiple versions
),
])
-def test_check_pkgs_for_precise_version(pkgs, expect_not_found):
- if expect_not_found:
- with pytest.raises(aos_version.PreciseVersionNotFound) as e:
- aos_version._check_precise_version_found(pkgs, expected_pkgs)
-
- assert list(expect_not_found.values()) == e.value.problem_pkgs
- else:
+def test_check_precise_version_found_fail(pkgs, expect_not_found):
+ with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
+ assert list(expect_not_found.values()) == e.value.problem_pkgs
-@pytest.mark.parametrize('pkgs, expect_higher', [
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
- [],
+ expected_pkgs,
),
(
+ # more precise but not strictly higher
[Package('spam', '3.2.1.9')],
- [], # more precise but not strictly higher
+ expected_pkgs,
),
(
+ [Package('ovs', '2.7')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ),
+])
+def test_check_higher_version_found(pkgs, expected_pkgs_dict):
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+
+
+@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
+ (
[Package('spam', '3.3')],
+ expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
+ expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
+ expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
+ expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
+ (
+ [Package('ovs', '2.8')],
+ {
+ "ovs": {
+ "name": "ovs",
+ "version": ["2.6", "2.7"],
+ "check_multi": False,
+ }
+ },
+ ['ovs-2.8'],
+ ),
])
-def test_check_pkgs_for_greater_version(pkgs, expect_higher):
- if expect_higher:
- with pytest.raises(aos_version.FoundHigherVersion) as e:
- aos_version._check_higher_version_found(pkgs, expected_pkgs)
- assert set(expect_higher) == set(e.value.problem_pkgs)
- else:
- aos_version._check_higher_version_found(pkgs, expected_pkgs)
+def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
+ with pytest.raises(aos_version.FoundHigherVersion) as e:
+ aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
+ assert set(expect_higher) == set(e.value.problem_pkgs)
-@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
- (
- [],
- [],
- ),
- (
- [Package('spam', '3.2.1')],
- [],
- ),
- (
- [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
- [],
- ),
+@pytest.mark.parametrize('pkgs', [
+ [],
+ [Package('spam', '3.2.1')],
+ [Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
+])
+def test_check_multi_minor_release(pkgs):
+ aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
@@ -140,10 +190,7 @@ def test_check_pkgs_for_greater_version(pkgs, expect_higher):
['eggs'],
),
])
-def test_check_pkgs_for_multi_release(pkgs, expect_to_flag_pkgs):
- if expect_to_flag_pkgs:
- with pytest.raises(aos_version.FoundMultiRelease) as e:
- aos_version._check_multi_minor_release(pkgs, expected_pkgs)
- assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
- else:
+def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
+ with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
+ assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index 876614b1d..bb25e3f66 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -65,8 +65,8 @@ non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
dict(info={
"Driver": "overlay",
}),
- True,
- ["unsupported Docker storage driver"],
+ False,
+ [],
),
(
dict(info={
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
index 19140a1b6..40a5d19d8 100644
--- a/roles/openshift_health_checker/test/kibana_test.py
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -169,7 +169,7 @@ def test_get_kibana_url(route, expect_url, expect_error):
),
])
def test_verify_url_internal_failure(exec_result, expect):
- check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result))
+ check = Kibana(execute_module=lambda module_name, args, tmp, task_vars: dict(failed=True, msg=exec_result))
check._get_kibana_url = lambda task_vars: ('url', None)
error = check._check_kibana_route({})
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
index b6db34fe3..128b76b12 100644
--- a/roles/openshift_health_checker/test/logging_check_test.py
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -50,6 +50,16 @@ plain_kibana_pod = {
}
}
+plain_kibana_pod_no_containerstatus = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
fluentd_pod_node1 = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
@@ -80,7 +90,7 @@ plain_curator_pod = {
("Permission denied", "Unexpected error using `oc`"),
])
def test_oc_failure(problem, expect):
- def execute_module(module_name, args, task_vars):
+ def execute_module(module_name, args, tmp, task_vars):
if module_name == "ocutil":
return dict(failed=True, result=problem)
return dict(changed=False)
@@ -135,3 +145,23 @@ def test_get_pods_for_component(pod_output, expect_pods, expect_error):
{}
)
assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('name, pods, expected_pods', [
+ (
+ 'test single pod found, scheduled, but no containerStatuses field',
+ [plain_kibana_pod_no_containerstatus],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+ (
+ 'set of pods has at least one pod with containerStatuses (scheduled); should still fail',
+ [plain_kibana_pod_no_containerstatus, plain_kibana_pod],
+ [plain_kibana_pod_no_containerstatus],
+ ),
+
+], ids=lambda argvals: argvals[0])
+def test_get_not_running_pods_no_container_status(name, pods, expected_pods):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: '')
+ result = check.not_running_pods(pods)
+
+ assert result == expected_pods
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 91eace512..1bb6371ae 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -72,36 +72,6 @@ def test_package_version(openshift_release):
assert result is return_value
-@pytest.mark.parametrize('deployment_type,openshift_release,expected_ovs_version', [
- ("openshift-enterprise", "3.5", "2.6"),
- ("origin", "3.6", "2.6"),
- ("openshift-enterprise", "3.4", "2.4"),
- ("origin", "3.3", "2.4"),
-])
-def test_ovs_package_version(deployment_type, openshift_release, expected_ovs_version):
- task_vars = dict(
- openshift=dict(common=dict(service_type='origin')),
- openshift_release=openshift_release,
- openshift_image_tag='v' + openshift_release,
- openshift_deployment_type=deployment_type,
- )
- return_value = object()
-
- def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- assert module_name == 'aos_version'
- assert "package_list" in module_args
-
- for pkg in module_args["package_list"]:
- if pkg["name"] == "openvswitch":
- assert pkg["version"] == expected_ovs_version
-
- return return_value
-
- check = PackageVersion(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result is return_value
-
-
@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
("origin", "3.5", "1.12"),
("openshift-enterprise", "3.4", "1.12"),
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
index c504bfb80..c2954fde1 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{% if 'glusterfs_registry' in groups %}{{ groups.glusterfs_registry[0] }}{% else %}{{ groups.glusterfs[0] }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% else %}{% set node = groups.glusterfs[0] %}{% endif %}{% if 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index dc8a9f089..fc9272679 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -21,7 +21,10 @@ storage:
regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }}
{% endif %}
bucket: {{ openshift_hosted_registry_storage_s3_bucket }}
- encrypt: false
+ encrypt: {{ openshift_hosted_registry_storage_s3_encrypt | default(false) }}
+{% if openshift_hosted_registry_storage_s3_kmskeyid is defined %}
+ keyid: {{ openshift_hosted_registry_storage_s3_kmskeyid }}
+{% endif %}
secure: true
v4auth: true
rootdirectory: {{ openshift_hosted_registry_storage_s3_rootdirectory | default('/registry') }}
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index a6bd12d4e..6b38da7f8 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -4,9 +4,14 @@
name: openvswitch
state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ register: l_openshift_node_stop_openvswitch_result
+ until: not l_openshift_node_stop_openvswitch_result | failed
+ retries: 3
+ delay: 30
notify:
- restart openvswitch pause
+
- name: restart openvswitch pause
pause: seconds=15
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
@@ -15,7 +20,13 @@
systemd:
name: "{{ openshift.common.service_type }}-node"
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+ register: l_openshift_node_restart_node_result
+ until: not l_openshift_node_restart_node_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (node_service_status_changed | default(false) | bool)
- name: reload sysctl.conf
command: /sbin/sysctl -p
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 573051504..879f6c207 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -118,8 +118,12 @@
name: openvswitch.service
enabled: yes
state: started
+ daemon_reload: yes
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
register: ovs_start_result
+ until: not ovs_start_result | failed
+ retries: 3
+ delay: 30
- set_fact:
ovs_service_status_changed: "{{ ovs_start_result | changed }}"
@@ -212,15 +216,27 @@
state: started
when: openshift.common.is_containerized | bool
+
- name: Start and enable node
systemd:
name: "{{ openshift.common.service_type }}-node"
enabled: yes
state: started
+ daemon_reload: yes
register: node_start_result
until: not node_start_result | failed
retries: 1
delay: 30
+ ignore_errors: true
+
+- name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
+
+- name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
- set_fact:
node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 8cfa5a026..c8d653880 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,3 +10,5 @@
name: openvswitch
image: "{{ openshift.common.system_images_registry }}/{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
state: latest
+ values:
+ - "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index d4f0b7762..1dbe58439 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -25,6 +25,7 @@ SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
OOMScoreAdjust=-999
+KillMode=process
[Install]
WantedBy=multi-user.target
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 502f80434..4abe8bcaf 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -9,3 +9,7 @@
name: "{{ openshift.docker.service_name }}"
state: restarted
when: not openshift_certificates_redeploy | default(false) | bool
+ register: l_docker_restart_docker_in_cert_result
+ until: not l_docker_restart_docker_in_cert_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index 8b388cc6a..4e6229bfb 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -84,6 +84,11 @@ Including an example of how to use your role (for instance, with variables passe
command: >
{{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
+
roles:
- openshift_facts
diff --git a/roles/openshift_node_upgrade/handlers/main.yml b/roles/openshift_node_upgrade/handlers/main.yml
index cb51416d4..110dfe5ce 100644
--- a/roles/openshift_node_upgrade/handlers/main.yml
+++ b/roles/openshift_node_upgrade/handlers/main.yml
@@ -1,7 +1,13 @@
---
- name: restart openvswitch
- systemd: name=openvswitch state=restarted
+ systemd:
+ name: openvswitch
+ state: restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift.common.use_openshift_sdn | bool
+ register: l_openshift_node_upgrade_stop_openvswitch_result
+ until: not l_openshift_node_upgrade_stop_openvswitch_result | failed
+ retries: 3
+ delay: 30
notify:
- restart openvswitch pause
@@ -10,5 +16,13 @@
when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
- name: restart node
- systemd: name={{ openshift.common.service_type }}-node state=restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
+ register: l_openshift_node_upgrade_restart_node_result
+ until: not l_openshift_node_upgrade_restart_node_result | failed
+ retries: 3
+ delay: 30
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (node_service_status_changed | default(false) | bool)
diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
index 416cf605a..ebe87d6fd 100644
--- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml
@@ -26,7 +26,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_openshift_node_upgrade_docker_stop_result
+ until: not l_openshift_node_upgrade_docker_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 6947223af..f228b6e08 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -19,7 +19,7 @@
state: started
register: docker_start_result
until: not docker_start_result | failed
- retries: 1
+ retries: 3
delay: 30
- name: Update docker facts
diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2
index d4f0b7762..1dbe58439 100644
--- a/roles/openshift_node_upgrade/templates/node.service.j2
+++ b/roles/openshift_node_upgrade/templates/node.service.j2
@@ -25,6 +25,7 @@ SyslogIdentifier={{ openshift.common.service_type }}-node
Restart=always
RestartSec=5s
OOMScoreAdjust=-999
+KillMode=process
[Install]
WantedBy=multi-user.target
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index 2e0dcfd97..bcc7fb590 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -137,6 +137,12 @@ objects:
- serviceclasses
verbs:
- create
+ - apiGroups:
+ - settings.k8s.io
+ resources:
+ - podpresets
+ verbs:
+ - create
- kind: ClusterRoleBinding
apiVersion: v1
diff --git a/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
new file mode 100644
index 000000000..1f25cc39f
--- /dev/null
+++ b/roles/openshift_service_catalog/files/openshift-ansible-catalog-console.js
@@ -0,0 +1,2 @@
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.service_catalog_landing_page = true;
+window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.pod_presets = true;
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index 1342c3d30..686857d94 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -23,7 +23,12 @@
oc_project:
state: present
name: "kube-service-catalog"
-# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}"
+ node_selector: ""
+
+- name: Make kube-service-catalog project network global
+ command: >
+ oc adm pod-network make-projects-global kube-service-catalog
+ when: os_sdn_network_plugin_name | default('') == 'redhat/openshift-ovs-multitenant'
- include: generate_certs.yml
@@ -61,6 +66,52 @@
template_name: kube-system-service-catalog
namespace: kube-system
+- oc_obj:
+ name: edit
+ kind: clusterrole
+ state: list
+ register: edit_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/edit
+ template:
+ src: sc_role_patching.j2
+ dest: "{{ mktemp.stdout }}/edit_sc_patch.yml"
+ vars:
+ original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update edit role for service catalog and pod preset access
+ command: >
+ oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
+ when:
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+- oc_obj:
+ name: admin
+ kind: clusterrole
+ state: list
+ register: admin_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/admin
+ template:
+ src: sc_role_patching.j2
+ dest: "{{ mktemp.stdout }}/admin_sc_patch.yml"
+ vars:
+ original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update admin role for service catalog and pod preset access
+ command: >
+ oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
+ when:
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
- shell: >
oc get policybindings/kube-system:default -n kube-system || echo "not found"
register: get_kube_system
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
index cbb29e623..d5291a99a 100644
--- a/roles/openshift_service_catalog/tasks/wire_aggregator.yml
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -119,6 +119,11 @@
when:
- not front_proxy_kubeconfig.stat.exists
+- name: copy tech preview extension file for service console UI
+ copy:
+ src: openshift-ansible-catalog-console.js
+ dest: /etc/origin/master/openshift-ansible-catalog-console.js
+
- name: Update master config
yedit:
state: present
@@ -138,6 +143,16 @@
value: [X-Remote-Group]
- key: authConfig.requestHeader.extraHeaderPrefixes
value: [X-Remote-Extra-]
+ - key: assetConfig.extensionScripts
+ value: [/etc/origin/master/openshift-ansible-catalog-console.js]
+ - key: kubernetesMasterConfig.apiServerArguments.runtime-config
+ value: [apis/settings.k8s.io/v1alpha1=true]
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.kind
+ value: DefaultAdmissionConfig
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion
+ value: v1
+ - key: admissionConfig.pluginConfig.PodPreset.configuration.disable
+ value: false
register: yedit_output
#restart master serially here
diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_role_patching.j2
new file mode 100644
index 000000000..69b062b3f
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/sc_role_patching.j2
@@ -0,0 +1,26 @@
+{{ original_content }}
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - instances
+ - bindings
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+- apiGroups:
+ - "settings.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 4b9a5f42c..b367e7daf 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -64,7 +64,7 @@ their configuration as GlusterFS nodes:
|--------------------|---------------------------|-----------------------------------------|
| glusterfs_cluster | 1 | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
| glusterfs_hostname | openshift.node.nodename | A hostname (or IP address) that will be used for internal GlusterFS communication
-| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node
+| glusterfs_ip | openshift.common.ip | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
| glusterfs_zone | 1 | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
Role Variables
@@ -76,7 +76,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| Name | Default value | Description |
|--------------------------------------------------|-------------------------|-----------------------------------------|
| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready
-| openshift_storage_glusterfs_namespace | 'default' | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace in which to create GlusterFS resources
| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized
| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names
| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name
@@ -85,6 +85,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
+| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
@@ -92,6 +93,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
+| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes
+| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
+| openshift_storage_glusterfs_heketi_ssh_keyfile | '/dev/null' | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
Each role variable also has a corresponding variable to optionally configure a
@@ -103,7 +109,7 @@ are an exception:
| Name | Default value | Description |
|-------------------------------------------------------|-----------------------|-----------------------------------------|
-| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs'
| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters
| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties
| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 4ff56af9e..a846889ca 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -1,6 +1,6 @@
---
openshift_storage_glusterfs_timeout: 300
-openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_namespace: 'glusterfs'
openshift_storage_glusterfs_is_native: True
openshift_storage_glusterfs_name: 'storage'
openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host"
@@ -8,9 +8,10 @@ openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_wipe: False
-openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_cli: 'heketi-cli'
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"
@@ -19,9 +20,14 @@ openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
openshift_storage_glusterfs_heketi_port: 8080
+openshift_storage_glusterfs_heketi_executor: 'kubernetes'
+openshift_storage_glusterfs_heketi_ssh_port: 22
+openshift_storage_glusterfs_heketi_ssh_user: 'root'
+openshift_storage_glusterfs_heketi_ssh_sudo: False
+openshift_storage_glusterfs_heketi_ssh_keyfile: '/dev/null'
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
-openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_registry_name: 'registry'
openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
@@ -29,9 +35,10 @@ openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
-openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}"
@@ -39,4 +46,9 @@ openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
-openshift_storage_glusterfs_registry_heketi_port: 8080
+openshift_storage_glusterfs_registry_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}"
+openshift_storage_glusterfs_registry_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}"
+openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}"
+openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
+openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
+openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 4434f750c..9ebb0d5ec 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -71,7 +71,7 @@ objects:
- name: HEKETI_ADMIN_KEY
value: ${HEKETI_ADMIN_KEY}
- name: HEKETI_EXECUTOR
- value: kubernetes
+ value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
value: /var/lib/heketi/fstab
- name: HEKETI_SNAPSHOT_LIMIT
@@ -87,6 +87,8 @@ objects:
mountPath: /var/lib/heketi
- name: topology
mountPath: ${TOPOLOGY_PATH}
+ - name: config
+ mountPath: /etc/heketi
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -104,6 +106,9 @@ objects:
- name: topology
secret:
secretName: heketi-${CLUSTER_NAME}-topology-secret
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -111,6 +116,10 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
- name: HEKETI_KUBE_NAMESPACE
displayName: Namespace
description: Set the namespace where the GlusterFS pods reside
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index e3fa0a9fb..61b6a8c13 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -67,7 +67,7 @@ objects:
- name: HEKETI_ADMIN_KEY
value: ${HEKETI_ADMIN_KEY}
- name: HEKETI_EXECUTOR
- value: kubernetes
+ value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
value: /var/lib/heketi/fstab
- name: HEKETI_SNAPSHOT_LIMIT
@@ -81,6 +81,8 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 3
@@ -98,6 +100,9 @@ objects:
glusterfs:
endpoints: heketi-db-${CLUSTER_NAME}-endpoints
path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
parameters:
- name: HEKETI_USER_KEY
displayName: Heketi User Secret
@@ -105,6 +110,10 @@ parameters:
- name: HEKETI_ADMIN_KEY
displayName: Heketi Administrator Secret
description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
- name: HEKETI_KUBE_NAMESPACE
displayName: Namespace
description: Set the namespace where the GlusterFS pods reside
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index af901103e..600d8f676 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -1,4 +1,16 @@
---
+- name: Make sure heketi-client is installed
+ package: name=heketi-client state=present
+ when:
+ - not openshift.common.is_atomic | bool
+ - not glusterfs_heketi_is_native | bool
+
+- name: Verify heketi-cli is installed
+ shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }"
+ changed_when: False
+ when:
+ - not glusterfs_heketi_is_native | bool
+
- name: Verify target namespace exists
oc_project:
state: present
@@ -19,6 +31,8 @@
name: "heketi-storage-endpoints"
- kind: "secret"
name: "heketi-{{ glusterfs_name }}-topology-secret"
+ - kind: "secret"
+ name: "heketi-{{ glusterfs_name }}-config-secret"
- kind: "template,route,service,dc"
name: "heketi-{{ glusterfs_name }}"
- kind: "svc"
@@ -125,6 +139,13 @@
when:
- glusterfs_heketi_topology_load
+- name: Generate heketi config file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/heketi.json.j2"
+ dest: "{{ mktemp.stdout }}/heketi.json"
+ when:
+ - glusterfs_heketi_is_native
+
- name: Generate heketi admin key
set_fact:
glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
@@ -142,6 +163,20 @@
- glusterfs_heketi_is_native
- glusterfs_heketi_user_key is undefined
+- name: Create heketi config secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-config-secret"
+ force: True
+ files:
+ - name: heketi.json
+ path: "{{ mktemp.stdout }}/heketi.json"
+ - name: private_key
+ path: "{{ glusterfs_heketi_ssh_keyfile }}"
+ when:
+ - glusterfs_heketi_is_native
+
- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
@@ -150,7 +185,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
@@ -180,6 +215,7 @@
data: "{{ glusterfs_heketi_admin_key }}"
when:
- glusterfs_storageclass
+ - glusterfs_heketi_admin_key is defined
- name: Get heketi route
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index dbfe126a4..b54a8e36c 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -12,6 +12,7 @@
glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+ glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}"
glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
@@ -20,6 +21,11 @@
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}"
+ glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}"
+ glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}"
+ glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
+ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
+ glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 0849f2a2e..0b4d1c82b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -12,6 +12,7 @@
glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}"
glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}"
glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}"
+ glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_registry_heketi_cli }}"
glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
@@ -20,6 +21,11 @@
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}"
+ glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_registry_heketi_executor }}"
+ glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_registry_heketi_ssh_port }}"
+ glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_registry_heketi_ssh_user }}"
+ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo }}"
+ glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index ea9b1fe1f..3ba1eb2d2 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -36,6 +36,7 @@
HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
TOPOLOGY_PATH: "{{ mktemp.stdout }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 63009c539..37d3e6ba2 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -106,6 +106,7 @@
HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
+ HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
@@ -125,7 +126,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 2ec9a9e9a..095fb780f 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -7,5 +7,7 @@ provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
secretNamespace: "{{ glusterfs_namespace }}"
secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
index 3aac68e2f..d6c28f6dd 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2
@@ -17,10 +17,20 @@
"node": {
"hostnames": {
"manage": [
- "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.node.nodename) }}"
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
],
"storage": [
- "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
]
},
"zone": {{ hostvars[node].glusterfs_zone | default(1) }}