summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--filter_plugins/openshift_master.py16
-rw-r--r--inventory/byo/hosts.origin.example15
-rw-r--r--inventory/byo/hosts.ose.example17
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml26
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml21
-rw-r--r--roles/cockpit-ui/tasks/main.yml2
-rw-r--r--roles/lib_openshift/library/oc_process.py1493
-rw-r--r--roles/lib_openshift/src/ansible/oc_process.py32
-rw-r--r--roles/lib_openshift/src/class/oc_process.py188
-rw-r--r--roles/lib_openshift/src/doc/process84
-rw-r--r--roles/lib_openshift/src/sources.yml10
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_process.yml83
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_process.py483
-rw-r--r--roles/lib_openshift/tasks/main.yml9
-rw-r--r--roles/openshift_logging/README.md4
-rw-r--r--roles/openshift_logging/defaults/main.yml28
-rw-r--r--roles/openshift_logging/files/fluent.conf1
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml2
-rw-r--r--roles/openshift_logging/templates/curator.j22
-rw-r--r--roles/openshift_logging/templates/fluentd.j22
-rw-r--r--roles/openshift_logging/templates/kibana.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j212
-rw-r--r--roles/openshift_metrics/defaults/main.yaml3
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j23
25 files changed, 2488 insertions, 52 deletions
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 4ccee91f9..6d009077a 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -162,7 +162,7 @@ class LDAPPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
@@ -206,7 +206,7 @@ class KeystonePasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -225,7 +225,7 @@ class RequestHeaderIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
@@ -256,7 +256,7 @@ class AllowAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
@@ -273,7 +273,7 @@ class DenyAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
@@ -290,7 +290,7 @@ class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@@ -315,7 +315,7 @@ class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -334,7 +334,7 @@ class IdentityProviderOauthBase(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- super(self.__class__, self).__init__(api_version, idp)
+ super(IdentityProviderOauthBase, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index c5f9db909..7741730ad 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -301,6 +301,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
+# OpenShift Registry Console Options
+# Override the console image prefix for enterprise deployments, not used in origin
+# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
+#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
+# Override image version, defaults to latest for origin, matches the product version for enterprise
+#openshift_cockpit_deployer_version=1.4.1
+
# Openshift Registry Options
#
# An OpenShift registry will be created during install if there are
@@ -481,11 +488,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_logging_public_url=https://kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_logging_es_cluster_size=1
-#openshift_logging_kibana_hostname=logging.apps.example.com
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the prefix and version for the deployer image
-#openshift_logging_image_prefix=registry.example.com:8888/openshift3/
-#openshift_logging_image_version=3.3.0
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index cacdbf115..3da9be081 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -301,6 +301,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
+# OpenShift Registry Console Options
+# Override the console image prefix for enterprise deployments, not used in origin
+# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
+#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
+# Override image version, defaults to latest for origin, matches the product version for enterprise
+#openshift_cockpit_deployer_version=1.4.1
+
# Openshift Registry Options
#
# An OpenShift registry will be created during install if there are
@@ -475,18 +482,18 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_hosted_logging/README.md
+# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to https://kibana.{{ openshift_master_default_subdomain }}
#openshift_master_logging_public_url=https://kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
-#openshift_logging_es_cluster_size=1
-#openshift_logging_kibana_hostname=logging.apps.example.com
+#openshift_hosted_logging_elasticsearch_cluster_size=1
+#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the prefix and version for the deployer image
-#openshift_logging_image_prefix=registry.example.com:8888/openshift3/
-#openshift_logging_image_version=3.3.0
+#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_logging_deployer_version=3.3.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index e83351272..44a2ef534 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -8,7 +8,7 @@
hosts: masters:!masters[0]
pre_tasks:
- set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
- include_role:
name: openshift_logging
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 7b58eebc3..ca4f5b8b2 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -31,17 +31,17 @@
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_logging
when: openshift_hosted_logging_deploy | default(false) | bool
- openshift_logging_kibana_hostname: "{{ logging_hostname }}"
- openshift_logging_kibana_ops_hostname: "{{ logging_ops_hostname }}"
- openshift_logging_master_public_url: "{{ logging_master_public_url }}"
- openshift_logging_es_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_logging_es_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_logging_es_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
- openshift_logging_es_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_logging_es_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_logging_es_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_logging_es_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
- openshift_logging_es_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}"
+ openshift_hosted_logging_hostname: "{{ logging_hostname }}"
+ openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
+ openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
+ openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}"
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
@@ -52,11 +52,11 @@
- hosted
pre_tasks:
- set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
- block:
- include_role:
- name: openshift_hosted_logging
+ name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 0b1c39ba4..9d4d3ea26 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -160,6 +160,27 @@
yaml_key: servingInfo.clientCA
yaml_value: ca-bundle.crt
when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt'
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: etcdClientInfo.ca
+ yaml_value: ca-bundle.crt
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+ - (g_master_config_output.content|b64decode|from_yaml).etcdClientInfo.ca != 'ca-bundle.crt'
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: etcdConfig.peerServingInfo.clientCA
+ yaml_value: ca-bundle.crt
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+ - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.peerServingInfo.clientCA != 'ca-bundle.crt'
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: etcdConfig.servingInfo.clientCA
+ yaml_value: ca-bundle.crt
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+ - (g_master_config_output.content|b64decode|from_yaml).etcdConfig.servingInfo.clientCA != 'ca-bundle.crt'
- name: Copy current OpenShift CA to legacy directory
hosts: oo_masters_to_config
diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml
index 87ed7fee7..f2ef4f161 100644
--- a/roles/cockpit-ui/tasks/main.yml
+++ b/roles/cockpit-ui/tasks/main.yml
@@ -63,10 +63,12 @@
changed_when: false
when: check_docker_registry_exists.rc == 0
+# TODO: Need to fix the origin and enterprise templates so that they both respect IMAGE_PREFIX
- name: Deploy registry-console
command: >
{{ openshift.common.client_binary }} new-app --template=registry-console
{% if openshift_cockpit_deployer_prefix is defined %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}
+ {% if openshift_cockpit_deployer_version is defined %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %}
-p OPENSHIFT_OAUTH_PROVIDER_URL="{{ openshift.master.public_api_url }}"
-p REGISTRY_HOST="{{ docker_registry_route.stdout }}"
-p COCKPIT_KUBE_URL="{{ registry_console_cockpit_kube_url.stdout }}"
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
new file mode 100644
index 000000000..702cb02d4
--- /dev/null
+++ b/roles/lib_openshift/library/oc_process.py
@@ -0,0 +1,1493 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/process -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_process
+short_description: Module to process openshift templates
+description:
+ - Process openshift templates programmatically.
+options:
+ state:
+ description:
+ - State has a few different meanings when it comes to process.
+ - state: present - This state runs an `oc process <template>`. When used in
+ - conjunction with 'create: True' the process will be piped to | oc create -f
+ - state: absent - will remove a template
+ - state: list - will perform an `oc get template <template_name>`
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ template_name:
+ description:
+ - Name of the openshift template that is being processed.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the template lives.
+ required: false
+ default: default
+ aliases: []
+ content:
+ description:
+ - Template content that will be processed.
+ required: false
+ default: None
+ aliases: []
+ params:
+ description:
+ - A list of parameters that will be inserted into the template.
+ required: false
+ default: None
+ aliases: []
+ create:
+ description:
+ - Whether or not to create the template after being processed. e.g. oc process | oc create -f -
+ required: False
+ default: False
+ aliases: []
+ reconcile:
+ description:
+ - Whether or not to attempt to determine if there are updates or changes in the incoming template.
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: process the cloud volume provisioner template with variables
+ oc_process:
+ namespace: openshift-infra
+ template_name: online-volume-provisioner
+ create: True
+ params:
+ PLAT: rhel7
+ register: processout
+ run_once: true
+- debug: var=processout
+'''
+
+# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['oadm']
+ else:
+ cmds = ['oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ returncode, stdout, stderr = self._run(cmds, input_data)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_process.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProcess(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ namespace,
+ tname=None,
+ params=None,
+ create=False,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ tdata=None,
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCProcess, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = tname
+ self.data = tdata
+ self.params = params
+ self.create = create
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._template = None
+
+ @property
+ def template(self):
+ '''template property'''
+ if self._template is None:
+ results = self._process(self.name, False, self.params, self.data)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ self._template = results['results']['items']
+
+ return self._template
+
+ def get(self):
+ '''get the template'''
+ results = self._get('template', self.name)
+ if results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in results['stderr']:
+ results['returncode'] = 0
+ results['exists'] = False
+ results['results'] = []
+
+ return results
+
+ def delete(self, obj):
+ '''delete a resource'''
+ return self._delete(obj['kind'], obj['metadata']['name'])
+
+ def create_obj(self, obj):
+ '''create a resource'''
+ return self._create_from_content(obj['metadata']['name'], obj)
+
+ def process(self, create=None):
+ '''process a template'''
+ do_create = False
+ if create != None:
+ do_create = create
+ else:
+ do_create = self.create
+
+ return self._process(self.name, do_create, self.params, self.data)
+
+ def exists(self):
+ '''return whether the template exists'''
+ # Always return true if we're being passed template data
+ if self.data:
+ return True
+ t_results = self._get('template', self.name)
+
+ if t_results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in t_results['stderr']:
+ return False
+ else:
+ raise OpenShiftCLIError('Something went wrong. %s' % t_results)
+
+ return True
+
+ def needs_update(self):
+ '''attempt to process the template and return it for comparison with oc objects'''
+ obj_results = []
+ for obj in self.template:
+
+ # build a list of types to skip
+ skip = []
+
+ if obj['kind'] == 'ServiceAccount':
+ skip.extend(['secrets', 'imagePullSecrets'])
+ if obj['kind'] == 'BuildConfig':
+ skip.extend(['lastTriggeredImageID'])
+ if obj['kind'] == 'ImageStream':
+ skip.extend(['generation'])
+ if obj['kind'] == 'DeploymentConfig':
+ skip.extend(['lastTriggeredImage'])
+
+ # fetch the current object
+ curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
+ if curr_obj_results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in curr_obj_results['stderr']:
+ obj_results.append((obj, True))
+ continue
+
+ # check the generated object against the existing object
+ if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
+ obj_results.append((obj, True))
+ continue
+
+ obj_results.append((obj, False))
+
+ return obj_results
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocprocess = OCProcess(params['namespace'],
+ params['template_name'],
+ params['params'],
+ params['create'],
+ kubeconfig=params['kubeconfig'],
+ tdata=params['content'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = ocprocess.get()
+
+ if state == 'list':
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg" : api_rval}
+
+ return {"changed" : False, "results": api_rval, "state": "list"}
+
+ elif state == 'present':
+ if check_mode and params['create']:
+ return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."}
+
+ if not ocprocess.exists() or not params['reconcile']:
+ #FIXME: this code will never get run in a way that succeeds when
+ # module.params['reconcile'] is true. Because oc_process doesn't
+ # create the actual template, the check of ocprocess.exists()
+ # is meaningless. Either it's already here and this code
+ # won't be run, or this code will fail because there is no
+ # template available for oc process to use. Have we conflated
+ # the template's existence with the existence of the objects
+ # it describes?
+
+ # Create it here
+ api_rval = ocprocess.process()
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ if params['create']:
+ return {"changed": True, "results": api_rval, "state": "present"}
+
+ return {"changed": False, "results": api_rval, "state": "present"}
+
+ # verify results
+ update = False
+ rval = []
+ all_results = ocprocess.needs_update()
+ for obj, status in all_results:
+ if status:
+ ocprocess.delete(obj)
+ results = ocprocess.create_obj(obj)
+ results['kind'] = obj['kind']
+ rval.append(results)
+ update = True
+
+ if not update:
+ return {"changed": update, "results": api_rval, "state": "present"}
+
+ for cmd in rval:
+ if cmd['returncode'] != 0:
+ return {"failed": True, "changed": update, "results": rval, "state": "present"}
+
+ return {"changed": update, "results": rval, "state": "present"}
+
+
+# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_process.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oc module for processing templates
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ template_name=dict(default=None, type='str'),
+ content=dict(default=None, type='str'),
+ params=dict(default=None, type='dict'),
+ create=dict(default=False, type='bool'),
+ reconcile=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProcess.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_process.py -*- -*- -*-
diff --git a/roles/lib_openshift/src/ansible/oc_process.py b/roles/lib_openshift/src/ansible/oc_process.py
new file mode 100644
index 000000000..17cf865b7
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_process.py
@@ -0,0 +1,32 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for processing templates
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str', choices=['present', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ template_name=dict(default=None, type='str'),
+ content=dict(default=None, type='str'),
+ params=dict(default=None, type='dict'),
+ create=dict(default=False, type='bool'),
+ reconcile=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProcess.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
new file mode 100644
index 000000000..80d81448d
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -0,0 +1,188 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProcess(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ namespace,
+ tname=None,
+ params=None,
+ create=False,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ tdata=None,
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCProcess, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = tname
+ self.data = tdata
+ self.params = params
+ self.create = create
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+ self._template = None
+
+ @property
+ def template(self):
+ '''template property'''
+ if self._template is None:
+ results = self._process(self.name, False, self.params, self.data)
+ if results['returncode'] != 0:
+ raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ self._template = results['results']['items']
+
+ return self._template
+
+ def get(self):
+ '''get the template'''
+ results = self._get('template', self.name)
+ if results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in results['stderr']:
+ results['returncode'] = 0
+ results['exists'] = False
+ results['results'] = []
+
+ return results
+
+ def delete(self, obj):
+ '''delete a resource'''
+ return self._delete(obj['kind'], obj['metadata']['name'])
+
+ def create_obj(self, obj):
+ '''create a resource'''
+ return self._create_from_content(obj['metadata']['name'], obj)
+
+ def process(self, create=None):
+ '''process a template'''
+ do_create = False
+ if create != None:
+ do_create = create
+ else:
+ do_create = self.create
+
+ return self._process(self.name, do_create, self.params, self.data)
+
+ def exists(self):
+ '''return whether the template exists'''
+ # Always return true if we're being passed template data
+ if self.data:
+ return True
+ t_results = self._get('template', self.name)
+
+ if t_results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in t_results['stderr']:
+ return False
+ else:
+ raise OpenShiftCLIError('Something went wrong. %s' % t_results)
+
+ return True
+
+ def needs_update(self):
+ '''attempt to process the template and return it for comparison with oc objects'''
+ obj_results = []
+ for obj in self.template:
+
+ # build a list of types to skip
+ skip = []
+
+ if obj['kind'] == 'ServiceAccount':
+ skip.extend(['secrets', 'imagePullSecrets'])
+ if obj['kind'] == 'BuildConfig':
+ skip.extend(['lastTriggeredImageID'])
+ if obj['kind'] == 'ImageStream':
+ skip.extend(['generation'])
+ if obj['kind'] == 'DeploymentConfig':
+ skip.extend(['lastTriggeredImage'])
+
+ # fetch the current object
+ curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
+ if curr_obj_results['returncode'] != 0:
+ # Does the template exist??
+ if 'not found' in curr_obj_results['stderr']:
+ obj_results.append((obj, True))
+ continue
+
+ # check the generated object against the existing object
+ if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
+ obj_results.append((obj, True))
+ continue
+
+ obj_results.append((obj, False))
+
+ return obj_results
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocprocess = OCProcess(params['namespace'],
+ params['template_name'],
+ params['params'],
+ params['create'],
+ kubeconfig=params['kubeconfig'],
+ tdata=params['content'],
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = ocprocess.get()
+
+ if state == 'list':
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg" : api_rval}
+
+ return {"changed" : False, "results": api_rval, "state": "list"}
+
+ elif state == 'present':
+ if check_mode and params['create']:
+ return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."}
+
+ if not ocprocess.exists() or not params['reconcile']:
+ #FIXME: this code will never get run in a way that succeeds when
+ # module.params['reconcile'] is true. Because oc_process doesn't
+ # create the actual template, the check of ocprocess.exists()
+ # is meaningless. Either it's already here and this code
+ # won't be run, or this code will fail because there is no
+ # template available for oc process to use. Have we conflated
+ # the template's existence with the existence of the objects
+ # it describes?
+
+ # Create it here
+ api_rval = ocprocess.process()
+ if api_rval['returncode'] != 0:
+ return {"failed": True, "msg": api_rval}
+
+ if params['create']:
+ return {"changed": True, "results": api_rval, "state": "present"}
+
+ return {"changed": False, "results": api_rval, "state": "present"}
+
+ # verify results
+ update = False
+ rval = []
+ all_results = ocprocess.needs_update()
+ for obj, status in all_results:
+ if status:
+ ocprocess.delete(obj)
+ results = ocprocess.create_obj(obj)
+ results['kind'] = obj['kind']
+ rval.append(results)
+ update = True
+
+ if not update:
+ return {"changed": update, "results": api_rval, "state": "present"}
+
+ for cmd in rval:
+ if cmd['returncode'] != 0:
+ return {"failed": True, "changed": update, "results": rval, "state": "present"}
+
+ return {"changed": update, "results": rval, "state": "present"}
+
diff --git a/roles/lib_openshift/src/doc/process b/roles/lib_openshift/src/doc/process
new file mode 100644
index 000000000..86a854c07
--- /dev/null
+++ b/roles/lib_openshift/src/doc/process
@@ -0,0 +1,84 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_process
+short_description: Module to process openshift templates
+description:
+ - Process openshift templates programmatically.
+options:
+ state:
+ description:
+ - State has a few different meanings when it comes to process.
+ - state: present - This state runs an `oc process <template>`. When used in
+ - conjunction with 'create: True' the process will be piped to | oc create -f
+ - state: absent - will remove a template
+ - state: list - will perform an `oc get template <template_name>`
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ template_name:
+ description:
+ - Name of the openshift template that is being processed.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the template lives.
+ required: false
+ default: default
+ aliases: []
+ content:
+ description:
+ - Template content that will be processed.
+ required: false
+ default: None
+ aliases: []
+ params:
+ description:
+ - A list of parameters that will be inserted into the template.
+ required: false
+ default: None
+ aliases: []
+ create:
+ description:
+ - Whether or not to create the template after being processed. e.g. oc process | oc create -f -
+ required: False
+ default: False
+ aliases: []
+ reconcile:
+ description:
+ - Whether or not to attempt to determine if there are updates or changes in the incoming template.
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: process the cloud volume provisioner template with variables
+ oc_process:
+ namespace: openshift-infra
+ template_name: online-volume-provisioner
+ create: True
+ params:
+ PLAT: rhel7
+ register: processout
+ run_once: true
+- debug: var=processout
+'''
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index aa02ce120..e9056655d 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -39,6 +39,16 @@ oc_obj.py:
- class/oc_obj.py
- ansible/oc_obj.py
+oc_process.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/process
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_process.py
+- ansible/oc_process.py
+
oc_route.py:
- doc/generated
- doc/license
diff --git a/roles/lib_openshift/src/test/integration/oc_process.yml b/roles/lib_openshift/src/test/integration/oc_process.yml
new file mode 100755
index 000000000..7ea4c6b99
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_process.yml
@@ -0,0 +1,83 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/:../../../../lib_utils/library
+
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ vars:
+ template_name: mysql-ephemeral
+ ns_name: test
+
+ post_tasks:
+ - name: get the mysql-ephemeral template
+ oc_obj:
+ name: mysql-ephemeral
+ state: list
+ namespace: openshift
+ kind: template
+ register: mysqltempl
+
+ - name: fix namespace
+ yedit:
+ src: /tmp/mysql-template
+ key: metadata.namespace
+ value: test
+ backup: false
+ content: "{{ mysqltempl.results.results[0] | to_yaml }}"
+
+ - name: create the test namespace
+ oc_obj:
+ name: test
+ state: present
+ namespace: test
+ kind: namespace
+ content:
+ path: /tmp/ns_test
+ data:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: test
+ spec:
+ finalizers:
+ - openshift.io/origin
+ - kubernetes
+ register: mysqltempl
+
+ - name: create the mysql-ephemeral template
+ oc_obj:
+ name: mysql-ephemeral
+ state: present
+ namespace: test
+ kind: template
+ files:
+ - /tmp/mysql-template
+ delete_after: True
+ register: mysqltempl
+
+ - name: process mysql-ephemeral
+ oc_process:
+ template_name: mysql-ephemeral
+ namespace: test
+ params:
+ NAMESPACE: test
+ DATABASE_SERVICE_NAME: testdb
+ create: False
+ reconcile: false
+ register: procout
+
+ - assert:
+ that:
+ - not procout.changed
+ - procout.results.results['items'][0]['metadata']['name'] == 'testdb'
+ - procout.results.results['items'][0]['kind'] == 'Service'
+ - procout.results.results['items'][1]['metadata']['name'] == 'testdb'
+ - procout.results.results['items'][1]['kind'] == 'DeploymentConfig'
+ msg: process failed on template
+
+ - name: remove namespace test
+ oc_obj:
+ kind: namespace
+ name: test
+ namespace: test
+ state: absent
diff --git a/roles/lib_openshift/src/test/unit/oc_process.py b/roles/lib_openshift/src/test/unit/oc_process.py
new file mode 100755
index 000000000..450ff7071
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_process.py
@@ -0,0 +1,483 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc process
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 1 test in 0.597s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_process import OCProcess # noqa: E402
+
+
+# pylint: disable=too-many-public-methods
+class OCProcessTest(unittest.TestCase):
+ '''
+ Test class for OCProcess
+ '''
+ mysql = '''{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-ephemeral",
+ "namespace": "openshift",
+ "selfLink": "/oapi/v1/namespaces/openshift/templates/mysql-ephemeral",
+ "uid": "fb8b5f04-e3d3-11e6-a982-0e84250fc302",
+ "resourceVersion": "480",
+ "creationTimestamp": "2017-01-26T14:30:27Z",
+ "annotations": {
+ "iconClass": "icon-mysql-database",
+ "openshift.io/display-name": "MySQL (Ephemeral)",
+ "tags": "database,mysql"
+ }
+ },
+ "objects": [
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "creationTimestamp": null,
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "nodePort": 0,
+ "port": 3306,
+ "protocol": "TCP",
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "sessionAffinity": "None",
+ "type": "ClusterIP"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "DeploymentConfig",
+ "metadata": {
+ "creationTimestamp": null,
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "strategy": {
+ "type": "Recreate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "capabilities": {},
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${MYSQL_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${MYSQL_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "image": " ",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ },
+ "timeoutSeconds": 1
+ },
+ "name": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
+ ]
+ },
+ "initialDelaySeconds": 5,
+ "timeoutSeconds": 1
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${DATABASE_SERVICE_NAME}-data"
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "restartPolicy": "Always",
+ "volumes": [
+ {
+ "emptyDir": {
+ "medium": ""
+ },
+ "name": "${DATABASE_SERVICE_NAME}-data"
+ }
+ ]
+ }
+ },
+ "triggers": [
+ {
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:${MYSQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ },
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MySQL Connection Username",
+ "description": "Username for MySQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MySQL Connection Password",
+ "description": "Password for the MySQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MySQL Database Name",
+ "description": "Name of the MySQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_VERSION",
+ "displayName": "Version of MySQL Image",
+ "description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
+ "value": "5.6",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ }
+}'''
+
+ def setUp(self):
+ ''' setup method will set to known configuration '''
+ pass
+
+ @mock.patch('oc_process.Utils.create_tmpfile_copy')
+ @mock.patch('oc_process.OCProcess._run')
+ def test_state_list(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a get '''
+ params = {'template_name': 'mysql-ephermeral',
+ 'namespace': 'test',
+ 'content': None,
+ 'state': 'list',
+ 'reconcile': False,
+ 'create': False,
+ 'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ mock_cmd.side_effect = [
+ (0, OCProcessTest.mysql, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mock_kubeconfig',
+ ]
+
+ results = OCProcess.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mysql-ephemeral')
+
+ @mock.patch('oc_process.Utils.create_tmpfile_copy')
+ @mock.patch('oc_process.OCProcess._run')
+ def test_process_no_create(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a process with no create '''
+ params = {'template_name': 'mysql-ephermeral',
+ 'namespace': 'test',
+ 'content': None,
+ 'state': 'present',
+ 'reconcile': False,
+ 'create': False,
+ 'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ mysqlproc = '''{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
+ "name": "testdb"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "nodePort": 0,
+ "port": 3306,
+ "protocol": "TCP",
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "testdb"
+ },
+ "sessionAffinity": "None",
+ "type": "ClusterIP"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "DeploymentConfig",
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
+ "name": "testdb"
+ },
+ "spec": {
+ "replicas": 1,
+ "selector": {
+ "name": "testdb"
+ },
+ "strategy": {
+ "type": "Recreate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "testdb"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "capabilities": {},
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "userHJJ"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "GITOAduAMaV6k688"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "sampledb"
+ }
+ ],
+ "image": " ",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ },
+ "timeoutSeconds": 1
+ },
+ "name": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
+ ]
+ },
+ "initialDelaySeconds": 5,
+ "timeoutSeconds": 1
+ },
+ "resources": {
+ "limits": {
+ "memory": "512Mi"
+ }
+ },
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "testdb-data"
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "restartPolicy": "Always",
+ "volumes": [
+ {
+ "emptyDir": {
+ "medium": ""
+ },
+ "name": "testdb-data"
+ }
+ ]
+ }
+ },
+ "triggers": [
+ {
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:5.6",
+ "namespace": "test"
+ },
+ "lastTriggeredImage": ""
+ },
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ }
+ ]
+}'''
+
+ mock_cmd.side_effect = [
+ (0, OCProcessTest.mysql, ''),
+ (0, OCProcessTest.mysql, ''),
+ (0, mysqlproc, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mock_kubeconfig',
+ ]
+
+ results = OCProcess.run_ansible(params, False)
+
+ self.assertFalse(results['changed'])
+ self.assertEqual(results['results']['results']['items'][0]['metadata']['name'], 'testdb')
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/tasks/main.yml b/roles/lib_openshift/tasks/main.yml
index 2980c8a8d..157cf8f7f 100644
--- a/roles/lib_openshift/tasks/main.yml
+++ b/roles/lib_openshift/tasks/main.yml
@@ -1,5 +1,12 @@
---
- name: lib_openshift ensure python-ruamel-yaml package is on target
package:
- name: python-ruamel-yaml
+ name: "{{ item }}"
state: present
+ with_items:
+ - ruamel.yaml
+ - ruamel.yaml
+ - ruamel.yaml
+ - ruamel.yaml
+ - ruamel.yaml
+ - ruamel.yaml
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 9394977c0..f4ec58668 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -24,8 +24,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log
- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
-- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'.
-- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'.
+- `openshift_logging_master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.{{openshift.common.dns_domain}}'.
+- `openshift_logging_master_public_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://{{openshift.common.public_hostname}}:8443'.
- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index ead59c029..5a229ee73 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -1,9 +1,9 @@
---
-openshift_logging_image_prefix: docker.io/openshift/origin-
-openshift_logging_image_version: latest
+openshift_logging_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default(docker.io/openshift/origin-) }}"
+openshift_logging_image_version: "{{ openshift_hosted_logging_deployer_version | default(latest) }}"
openshift_logging_use_ops: False
-master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
-openshift_logging_master_public_url: "https://{{openshift.common.public_hostname}}:8443"
+openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(https://{{openshift.common.public_hostname}}:8443) }}"
openshift_logging_namespace: logging
openshift_logging_install_logging: True
@@ -19,7 +19,7 @@ openshift_logging_curator_memory_limit: null
openshift_logging_curator_ops_cpu_limit: 100m
openshift_logging_curator_ops_memory_limit: null
-openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default(kibana.{{openshift.common.dns_domain}}) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: null
openshift_logging_kibana_proxy_debug: false
@@ -27,7 +27,7 @@ openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: null
openshift_logging_kibana_replica_count: 1
-openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default(kibana-ops.{{openshift.common.dns_domain}}) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: null
openshift_logging_kibana_ops_proxy_debug: false
@@ -48,13 +48,13 @@ openshift_logging_es_port: 9200
openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
-openshift_logging_es_cluster_size: 1
+openshift_logging_es_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
openshift_logging_es_cpu_limit: null
openshift_logging_es_memory_limit: 1024Mi
openshift_logging_es_pv_selector: null
-openshift_logging_es_pvc_dynamic: False
-openshift_logging_es_pvc_size: ""
-openshift_logging_es_pvc_prefix: logging-es
+openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
+openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
+openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default(logging-es) }}"
openshift_logging_es_recover_after_time: 5m
openshift_logging_es_storage_group: 65534
@@ -66,13 +66,13 @@ openshift_logging_es_ops_port: 9200
openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
-openshift_logging_es_ops_cluster_size: 1
+openshift_logging_es_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
openshift_logging_es_ops_cpu_limit: null
openshift_logging_es_ops_memory_limit: 1024Mi
openshift_logging_es_ops_pv_selector: None
-openshift_logging_es_ops_pvc_dynamic: False
-openshift_logging_es_ops_pvc_size: ""
-openshift_logging_es_ops_pvc_prefix: logging-es-ops
+openshift_logging_es_ops_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
+openshift_logging_es_ops_pvc_size: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_size | default('') }}"
+openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ops_pvc_prefix | default(logging-es-ops) }}"
openshift_logging_es_ops_recover_after_time: 5m
openshift_logging_es_ops_storage_group: 65534
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
index aa843e983..c0c1c8a44 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging/files/fluent.conf
@@ -22,6 +22,7 @@
@include configs.d/openshift/filter-k8s-flatten-hash.conf
@include configs.d/openshift/filter-k8s-record-transform.conf
@include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-common-data-model.conf
@include configs.d/openshift/filter-post-*.conf
##
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index cceacd538..0dc31932c 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -17,7 +17,7 @@
oc_scale:
kind: dc
name: "{{object.split('/')[1]}}"
- namespace: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_logging_namespace}}"
replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
index de6258eaa..b7bc15b62 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging/templates/curator.j2
@@ -48,7 +48,7 @@ spec:
env:
-
name: "K8S_HOST_URL"
- value: "{{master_url}}"
+ value: "{{openshift_logging_master_url}}"
-
name: "ES_HOST"
value: "{{es_host}}"
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
index b6c91f8ed..223d342b9 100644
--- a/roles/openshift_logging/templates/fluentd.j2
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -61,7 +61,7 @@ spec:
readOnly: true
env:
- name: "K8S_HOST_URL"
- value: "{{master_url}}"
+ value: "{{openshift_logging_master_url}}"
- name: "ES_HOST"
value: "{{openshift_logging_es_host}}"
- name: "ES_PORT"
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
index 3a9e03768..be9b45ab4 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -90,7 +90,7 @@ spec:
value: kibana-proxy
-
name: "OAP_MASTER_URL"
- value: {{master_url}}
+ value: {{openshift_logging_master_url}}
-
name: "OAP_PUBLIC_MASTER_URL"
value: {{openshift_logging_master_public_url}}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 9ae54dac1..cf2d2e103 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -65,7 +65,11 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
+{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+{% else %}
ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
+{% endif %}
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
@@ -79,12 +83,20 @@ etcdConfig:
peerServingInfo:
bindAddress: {{ openshift.master.bind_addr }}:7001
certFile: etcd.server.crt
+{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ clientCA: ca-bundle.crt
+{% else %}
clientCA: ca.crt
+{% endif %}
keyFile: etcd.server.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
certFile: etcd.server.crt
+{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ clientCA: ca-bundle.crt
+{% else %}
clientCA: ca.crt
+{% endif %}
keyFile: etcd.server.key
storageDirectory: {{ openshift.common.data_dir }}/openshift.local.etcd
{% endif %}
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
index b2ff08e05..0cfbac8a9 100644
--- a/roles/openshift_metrics/defaults/main.yaml
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -23,6 +23,7 @@ openshift_metrics_cassandra_limits_cpu: null
openshift_metrics_cassandra_requests_memory: 1G
openshift_metrics_cassandra_requests_cpu: null
openshift_metrics_cassandra_nodeselector: ""
+openshift_metrics_cassandra_storage_group: 65534
openshift_metrics_heapster_standalone: False
openshift_metrics_heapster_limits_memory: 3.75G
@@ -31,6 +32,8 @@ openshift_metrics_heapster_requests_memory: 0.9375G
openshift_metrics_heapster_requests_cpu: null
openshift_metrics_heapster_nodeselector: ""
+openshift_metrics_hostname: "hawkular-metrics.{{openshift_master_default_subdomain}}"
+
openshift_metrics_duration: 7
openshift_metrics_resolution: 15s
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 6f6efc469..504476dc4 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -19,6 +19,9 @@ spec:
type: hawkular-cassandra
spec:
serviceAccount: cassandra
+ securityContext:
+ supplementalGroups:
+ - {{openshift_metrics_cassandra_storage_group}}
{% if node_selector is iterable and node_selector | length > 0 %}
nodeSelector:
{% for key, value in node_selector.iteritems() %}