summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--filter_plugins/openshift_master.py13
-rw-r--r--openshift-ansible.spec16
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml6
-rw-r--r--playbooks/common/openshift-master/config.yml3
-rw-r--r--roles/lib_openshift_api/library/oc_deploymentconfig.py377
-rw-r--r--roles/lib_openshift_api/library/oc_secret.py (renamed from roles/lib_openshift_api/library/oc_secrets.py)419
-rw-r--r--roles/lib_openshift_api/library/oc_service.py378
-rw-r--r--roles/lib_yaml_editor/library/yedit.py2
-rw-r--r--roles/openshift_hosted_logging/README.md10
-rw-r--r--roles/openshift_hosted_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml3
-rw-r--r--roles/openshift_hosted_logging/tasks/cleanup_logging.yaml59
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml105
-rw-r--r--roles/openshift_hosted_logging/tasks/main.yaml8
-rw-r--r--roles/openshift_hosted_logging/vars/main.yaml6
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml13
-rw-r--r--roles/openshift_manageiq/vars/main.yml8
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml3
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml35
20 files changed, 1265 insertions, 207 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index fad006e05..087da58c4 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.63-1 ./
+3.0.65-1 ./
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 3a1d77f53..d0fb98ec3 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -372,13 +372,12 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
- if 'extraScopes' not in self.provider['extraScopes'] and not isinstance(self.provider['extraScopes'], list):
- raise errors.AnsibleFilterError("|failed extraScopes for provider "
- "{0} must be a list".format(self.__class__.__name__))
- if ('extraAuthorizeParameters' not in self.provider['extraAuthorizeParameters']
- and not isinstance(self.provider['extraAuthorizeParameters'], dict)):
- raise errors.AnsibleFilterError("|failed extraAuthorizeParameters "
- "for provider {0} must be a dictionary".format(self.__class__.__name__))
+ for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):
+ if var in self.provider and not isinstance(self.provider[var], var_type):
+ raise errors.AnsibleFilterError("|failed {1} for provider "
+ "{0} must be a {2}".format(self.__class__.__name__,
+ var,
+ var_type.__class__.__name__))
required_claims = ['id']
optional_claims = ['email', 'name', 'preferredUsername']
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 9f2d035a1..fa9940225 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.63
+Version: 3.0.65
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -279,6 +279,20 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Mar 24 2016 Troy Dawson <tdawson@redhat.com> 3.0.65-1
+- Adding deployment config and refactored. (kwoodson@redhat.com)
+- ManageIQ SA: Adding image-puller role (efreiber@redhat.com)
+
+* Wed Mar 23 2016 Troy Dawson <tdawson@redhat.com> 3.0.64-1
+- Latest cli updates from generated files (kwoodson@redhat.com)
+- Add /dev to node containers (sdodson@redhat.com)
+- Fix indention (whearn@redhat.com)
+- Support setting local storage perFSGroup quota in node config.
+ (dgoodwin@redhat.com)
+- Fix line break (whearn@redhat.com)
+- Lock down permissions on named certificates (elyscape@gmail.com)
+- Add namespace flag to oc create (whearn@redhat.com)
+
* Mon Mar 21 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.63-1
- Modified group selectors for muliple clusters per account
(kwoodson@redhat.com)
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
new file mode 100644
index 000000000..a3121d046
--- /dev/null
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -0,0 +1,6 @@
+---
+- hosts: masters[0]
+ roles:
+ - role: openshift_hosted_logging
+ openshift_hosted_logging_cleanup: no
+
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 972427c53..f1eaf8e16 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -311,13 +311,14 @@
file:
path: "{{ named_certs_dir }}"
state: directory
+ mode: 0700
when: named_certs_specified | bool
- name: Land named certificates
copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
- name: Land named certificate keys
- copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}"
+ copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
diff --git a/roles/lib_openshift_api/library/oc_deploymentconfig.py b/roles/lib_openshift_api/library/oc_deploymentconfig.py
new file mode 100644
index 000000000..fbdaa8e9c
--- /dev/null
+++ b/roles/lib_openshift_api/library/oc_deploymentconfig.py
@@ -0,0 +1,377 @@
+#!/usr/bin/env python
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+import atexit
+import json
+import os
+import shutil
+import subprocess
+import yaml
+
+class OpenShiftCLI(object):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def replace(self, fname, force=False):
+ '''return all pods '''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd = ['replace', '--force', '-f', fname]
+ return self.oc_cmd(cmd)
+
+ def create(self, fname):
+ '''return all pods '''
+ return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+
+ def delete(self, resource, rname):
+ '''return all pods '''
+ return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+
+ def get(self, resource, rname=None):
+ '''return a secret by name '''
+ cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
+ if rname:
+ cmd.append(rname)
+
+ rval = self.oc_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if rval.has_key('items'):
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def oc_cmd(self, cmd, output=False):
+ '''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
+ cmds = ['/usr/bin/oc']
+ cmds.extend(cmd)
+
+ results = ''
+
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+ proc.wait()
+ if proc.returncode == 0:
+ if output:
+ try:
+ results = json.loads(proc.stdout.read())
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ results = err.message
+
+ if self.verbose:
+ print proc.stderr.read()
+ print results
+ print
+
+ return {"returncode": proc.returncode, "results": results}
+
+ return {"returncode": proc.returncode,
+ "stderr": proc.stderr.read(),
+ "stdout": proc.stdout.read(),
+ "results": {}
+ }
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype=None):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = Utils.create_file(sfile['path'], sfile['content'])
+ files.append(path)
+
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+class DeploymentConfig(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ def __init__(self,
+ namespace,
+ dname=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(DeploymentConfig, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = dname
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get_dc(self):
+ '''return a deploymentconfig by name '''
+ return self.get('dc', self.name)
+
+ def delete_dc(self):
+ '''return all pods '''
+ return self.delete('dc', self.name)
+
+ def new_dc(self, dfile):
+ '''Create a deploymentconfig '''
+ return self.create(dfile)
+
+ def update_dc(self, dfile, force=False):
+ '''run update dc
+
+ This receives a list of file names and takes the first filename and calls replace.
+ '''
+ return self.replace(dfile, force)
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for deploymentconfig
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, type='str'),
+ deploymentconfig_file=dict(default=None, type='str'),
+ input_type=dict(default='yaml', choices=['yaml', 'json'], type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ ),
+ mutually_exclusive=[["contents", "deploymentconfig_file"]],
+
+ supports_check_mode=True,
+ )
+ occmd = DeploymentConfig(module.params['namespace'],
+ dname=module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
+
+ state = module.params['state']
+
+ api_rval = occmd.get_dc()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ module.exit_json(changed=False, results=api_rval['results'], state="list")
+
+ if not module.params['name']:
+ module.fail_json(msg='Please specify a name when state is absent|present.')
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], module.params['name']):
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = occmd.delete_dc()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ if module.params['deploymentconfig_file']:
+ dfile = module.params['deploymentconfig_file']
+ elif module.params['content']:
+ dfile = Utils.create_file('dc', module.params['content'])
+ else:
+ module.fail_json(msg="Please specify content or deploymentconfig file.")
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], module.params['name']):
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = occmd.new_dc(dfile)
+
+ # Remove files
+ if module.params['deploymentconfig_file'] and module.params['delete_after']:
+ Utils.cleanup([dfile])
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ if Utils.check_def_equal(Utils.get_resource_file(dfile), api_rval['results'][0]):
+
+ # Remove files
+ if module.params['deploymentconfig_file'] and module.params['delete_after']:
+ Utils.cleanup([dfile])
+
+ module.exit_json(changed=False, results=api_rval['results'], state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = occmd.update_dc(dfile, force=module.params['force'])
+
+ # Remove files
+ if module.params['deploymentconfig_file'] and module.params['delete_after']:
+ Utils.cleanup([dfile])
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_openshift_api/library/oc_secrets.py b/roles/lib_openshift_api/library/oc_secret.py
index 841c14692..96a0f1db1 100644
--- a/roles/lib_openshift_api/library/oc_secrets.py
+++ b/roles/lib_openshift_api/library/oc_secret.py
@@ -1,72 +1,45 @@
#!/usr/bin/env python
'''
-module for openshift cloud secrets
+ OpenShiftCLI class that wraps the oc commands in a subprocess
'''
-# Examples:
-#
-# # to initiate and use /etc/origin/master/admin.kubeconfig file for auth
-# - name: list secrets
-# oc_secrets:
-# state: list
-# namespace: default
-#
-# # To get a specific secret named 'mysecret'
-# - name: list secrets
-# oc_secrets:
-# state: list
-# namespace: default
-# name: mysecret
-#
-# # To create a secret:
-# # This module expects the user to place the files on the remote server and pass them in.
-# - name: create a secret from file
-# oc_secrets:
-# state: present
-# namespace: default
-# name: mysecret
-# files:
-# - /tmp/config.yml
-# - /tmp/passwords.yml
-# delete_after: False
-
-# # To create a secret:
-# # This module expects the user to place the files on the remote server and pass them in.
-# - name: create a secret from content
-# oc_secrets:
-# state: present
-# namespace: default
-# name: mysecret
-# contents:
-# - path: /tmp/config.yml
-# content: "value=True\n"
-# - path: /tmp/passwords.yml
-# content: "test1\ntest2\ntest3\ntest4\n"
-#
-
+import atexit
+import json
import os
import shutil
-import json
-import atexit
+import subprocess
+import yaml
-class OpenShiftOC(object):
- ''' Class to wrap the oc command line tools
- '''
+class OpenShiftCLI(object):
+ ''' Class to wrap the oc command line tools '''
def __init__(self,
namespace,
- secret_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
self.namespace = namespace
- self.name = secret_name
self.verbose = verbose
self.kubeconfig = kubeconfig
- def get_secrets(self):
+ def replace(self, fname, force=False):
+ '''return all pods '''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd = ['replace', '--force', '-f', fname]
+ return self.oc_cmd(cmd)
+
+ def create(self, fname):
+ '''return all pods '''
+ return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+
+ def delete(self, resource, rname):
+ '''return all pods '''
+ return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+
+ def get(self, resource, rname=None):
'''return a secret by name '''
- cmd = ['get', 'secrets', '-o', 'json', '-n', self.namespace]
- if self.name:
- cmd.append(self.name)
+ cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
+ if rname:
+ cmd.append(rname)
rval = self.oc_cmd(cmd, output=True)
@@ -78,65 +51,9 @@ class OpenShiftOC(object):
return rval
- def delete_secret(self):
- '''return all pods '''
- return self.oc_cmd(['delete', 'secrets', self.name, '-n', self.namespace])
-
- def secret_new(self, files):
- '''Create a secret with all pods '''
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.oc_cmd(cmd)
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
- for sfile in data:
- with open(sfile['path'], 'w') as fds:
- fds.write(sfile['content'])
- files.append(sfile['path'])
-
- # Register cleanup when module is done
- atexit.register(OpenShiftOC.cleanup, files)
- return files
-
- def update_secret(self, files, force=False):
- '''run update secret
-
- This receives a list of file names and converts it into a secret.
- The secret is then written to disk and passed into the `oc replace` command.
- '''
- secret = self.prep_secret(files)
- if secret['returncode'] != 0:
- return secret
-
- sfile_path = '/tmp/%s' % secret['results']['metadata']['name']
- with open(sfile_path, 'w') as sfd:
- sfd.write(json.dumps(secret['results']))
-
- cmd = ['replace', '-f', sfile_path]
- if force:
- cmd = ['replace', '--force', '-f', sfile_path]
-
- atexit.register(OpenShiftOC.cleanup, [sfile_path])
-
- return self.oc_cmd(cmd)
-
- def prep_secret(self, files):
- ''' return what the secret would look like if created
- This is accomplished by passing -ojson. This will most likely change in the future
- '''
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.oc_cmd(cmd, output=True)
-
def oc_cmd(self, cmd, output=False):
'''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
@@ -171,6 +88,36 @@ class OpenShiftOC(object):
"results": {}
}
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype=None):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = Utils.create_file(sfile['path'], sfile['content'])
+ files.append(path)
+
+ return files
+
@staticmethod
def cleanup(files):
'''Clean up on exit '''
@@ -182,83 +129,167 @@ class OpenShiftOC(object):
os.remove(sfile)
-def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
return False
- if find_result(results, _name):
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
return True
- return False
-
-def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- #print "%s == %s" % (result['metadata']['name'], name)
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
-# Disabling too-many-branches. This is a yaml dictionary comparison function
-# pylint: disable=too-many-branches,too-many-return-statements
-def check_def_equal(user_def, result_def, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- return False
-
- # lists should be identical
- if value != user_def[key]:
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = check_def_equal(user_def[key], value)
- if not result:
- if debug:
- print "dict returned false"
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
+class Secret(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ def __init__(self,
+ namespace,
+ secret_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Secret, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = secret_name
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get_secrets(self):
+ '''return a secret by name '''
+ return self.get('secrets', self.name)
+
+ def delete_secret(self):
+ '''return all pods '''
+ return self.delete('secrets', self.name)
+
+ def secret_new(self, files=None, contents=None):
+ '''Create a secret with all pods '''
+ if not files:
+ files = Utils.create_files_from_contents(contents)
+
+ secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
+ cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
+ cmd.extend(secrets)
+
+ return self.oc_cmd(cmd)
+
+ def update_secret(self, files, force=False):
+ '''run update secret
+
+ This receives a list of file names and converts it into a secret.
+ The secret is then written to disk and passed into the `oc replace` command.
+ '''
+ secret = self.prep_secret(files)
+ if secret['returncode'] != 0:
+ return secret
+
+ sfile_path = '/tmp/%s' % self.name
+ with open(sfile_path, 'w') as sfd:
+ sfd.write(json.dumps(secret['results']))
+
+ atexit.register(Utils.cleanup, [sfile_path])
+
+ return self.replace(sfile_path, force=force)
+
+ def prep_secret(self, files=None, contents=None):
+ ''' return what the secret would look like if created
+ This is accomplished by passing -ojson. This will most likely change in the future
+ '''
+ if not files:
+ files = Utils.create_files_from_contents(contents)
+
+ secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
+ cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
+ cmd.extend(secrets)
+
+ return self.oc_cmd(cmd, output=True)
- return True
+# pylint: disable=too-many-branches
def main():
'''
ansible oc module for secrets
@@ -281,10 +312,10 @@ def main():
supports_check_mode=True,
)
- occmd = OpenShiftOC(module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
+ occmd = Secret(module.params['namespace'],
+ module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
state = module.params['state']
@@ -302,7 +333,7 @@ def main():
# Delete
########
if state == 'absent':
- if not exists(api_rval['results'], module.params['name']):
+ if not Utils.exists(api_rval['results'], module.params['name']):
module.exit_json(changed=False, state="absent")
if module.check_mode:
@@ -316,39 +347,39 @@ def main():
if module.params['files']:
files = module.params['files']
elif module.params['contents']:
- files = OpenShiftOC.create_files_from_contents(module.params['contents'])
+ files = Utils.create_files_from_contents(module.params['contents'])
else:
module.fail_json(msg='Either specify files or contents.')
########
# Create
########
- if not exists(api_rval['results'], module.params['name']):
+ if not Utils.exists(api_rval['results'], module.params['name']):
if module.check_mode:
module.exit_json(change=False, msg='Would have performed a create.')
- api_rval = occmd.secret_new(files)
+ api_rval = occmd.secret_new(module.params['files'], module.params['contents'])
# Remove files
if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ Utils.cleanup(files)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
- secret = occmd.prep_secret(files)
+ secret = occmd.prep_secret(module.params['files'], module.params['contents'])
if secret['returncode'] != 0:
module.fail_json(msg=secret)
- if check_def_equal(secret['results'], api_rval['results'][0]):
+ if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
# Remove files
if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ Utils.cleanup(files)
module.exit_json(changed=False, results=secret['results'], state="present")
@@ -358,8 +389,8 @@ def main():
api_rval = occmd.update_secret(files, force=module.params['force'])
# Remove files
- if files and module.params['delete_after']:
- OpenShiftOC.cleanup(files)
+ if secret and module.params['delete_after']:
+ Utils.cleanup(files)
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
diff --git a/roles/lib_openshift_api/library/oc_service.py b/roles/lib_openshift_api/library/oc_service.py
new file mode 100644
index 000000000..e7bd2514e
--- /dev/null
+++ b/roles/lib_openshift_api/library/oc_service.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+import atexit
+import json
+import os
+import shutil
+import subprocess
+import yaml
+
+class OpenShiftCLI(object):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def replace(self, fname, force=False):
+ '''return all pods '''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd = ['replace', '--force', '-f', fname]
+ return self.oc_cmd(cmd)
+
+ def create(self, fname):
+ '''return all pods '''
+ return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+
+ def delete(self, resource, rname):
+ '''return all pods '''
+ return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+
+ def get(self, resource, rname=None):
+ '''return a secret by name '''
+ cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
+ if rname:
+ cmd.append(rname)
+
+ rval = self.oc_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if rval.has_key('items'):
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def oc_cmd(self, cmd, output=False):
+ '''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
+ cmds = ['/usr/bin/oc']
+ cmds.extend(cmd)
+
+ results = ''
+
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+ proc.wait()
+ if proc.returncode == 0:
+ if output:
+ try:
+ results = json.loads(proc.stdout.read())
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ results = err.message
+
+ if self.verbose:
+ print proc.stderr.read()
+ print results
+ print
+
+ return {"returncode": proc.returncode, "results": results}
+
+ return {"returncode": proc.returncode,
+ "stderr": proc.stderr.read(),
+ "stdout": proc.stdout.read(),
+ "results": {}
+ }
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype=None):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = Utils.create_file(sfile['path'], sfile['content'])
+ files.append(path)
+
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+class Service(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ def __init__(self,
+ namespace,
+ service_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Service, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.name = service_name
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def create_service(self, sfile):
+ ''' create the service '''
+ return self.create(sfile)
+
+ def get_services(self):
+ '''return a secret by name '''
+ return self.get('services', self.name)
+
+ def delete_service(self):
+ '''return all pods '''
+ return self.delete('service', self.name)
+
+ def update_service(self, sfile, force=False):
+ '''run update service
+
+ This receives a list of file names and converts it into a secret.
+ The secret is then written to disk and passed into the `oc replace` command.
+ '''
+ return self.replace(sfile, force=force)
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, type='str'),
+ service_file=dict(default=None, type='str'),
+ input_type=dict(default='yaml',
+ choices=['json', 'yaml'],
+ type='str'),
+ delete_after=dict(default=False, type='bool'),
+ contents=dict(default=None, type='list'),
+ force=dict(default=False, type='bool'),
+ ),
+ mutually_exclusive=[["contents", "service_file"]],
+
+ supports_check_mode=True,
+ )
+ occmd = Service(module.params['namespace'],
+ module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
+
+ state = module.params['state']
+
+ api_rval = occmd.get_services()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ module.exit_json(changed=False, results=api_rval['results'], state="list")
+
+ if not module.params['name']:
+ module.fail_json(msg='Please specify a name when state is absent|present.')
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], module.params['name']):
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = occmd.delete_service()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ if module.params['service_file']:
+ sfile = module.params['service_file']
+ elif module.params['contents']:
+ sfile = Utils.create_files_from_contents(module.params['contents'])
+ else:
+ module.fail_json(msg='Either specify files or contents.')
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], module.params['name']):
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = occmd.create_service(sfile)
+
+ # Remove files
+ if sfile and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ sfile_contents = Utils.get_resource_file(sfile, module.params['input_type'])
+ if Utils.check_def_equal(sfile_contents, api_rval['results'][0]):
+
+ # Remove files
+ if module.params['service_file'] and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ module.exit_json(changed=False, results=api_rval['results'][0], state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = occmd.update_service(sfile, force=module.params['force'])
+
+ # Remove files
+ if sfile and module.params['delete_after']:
+ Utils.cleanup([sfile])
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_yaml_editor/library/yedit.py b/roles/lib_yaml_editor/library/yedit.py
index b75aff109..9b565d0c7 100644
--- a/roles/lib_yaml_editor/library/yedit.py
+++ b/roles/lib_yaml_editor/library/yedit.py
@@ -185,7 +185,7 @@ def main():
rval = yamlfile.get()
if not rval and state != 'present':
module.fail_json(msg='Error opening file [%s]. Verify that the' + \
- ' file exists and that it is has correct permissions.')
+ ' file exists, that it is has correct permissions, and is valid yaml.')
if state == 'list':
module.exit_json(changed=False, results=rval, state="list")
diff --git a/roles/openshift_hosted_logging/README.md b/roles/openshift_hosted_logging/README.md
new file mode 100644
index 000000000..b3f363571
--- /dev/null
+++ b/roles/openshift_hosted_logging/README.md
@@ -0,0 +1,10 @@
+###Required vars:
+
+- openshift_hosted_logging_hostname: kibana.example.com
+- openshift_hosted_logging_elasticsearch_cluster_size: 1
+- openshift_hosted_logging_master_public_url: https://localhost:8443
+
+###Optional vars:
+- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key
+- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3
+- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying.
diff --git a/roles/openshift_hosted_logging/files/logging-deployer-sa.yaml b/roles/openshift_hosted_logging/files/logging-deployer-sa.yaml
new file mode 100644
index 000000000..334c9402b
--- /dev/null
+++ b/roles/openshift_hosted_logging/files/logging-deployer-sa.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: logging-deployer
+secrets:
+- name: logging-deployer
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
new file mode 100644
index 000000000..b695bde87
--- /dev/null
+++ b/roles/openshift_hosted_logging/meta/main.yaml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - { role: openshift_common }
diff --git a/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
new file mode 100644
index 000000000..8331f0389
--- /dev/null
+++ b/roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
@@ -0,0 +1,59 @@
+---
+ - name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+ - name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+ - name: "Checking for logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+ register: logging_project
+ failed_when: "'FAILED' in logging_project.stderr"
+
+ - name: "Changing projects"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+
+
+ - name: "Cleanup any previous logging infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+ with_items:
+ - kibana
+ - fluentd
+ - elasticsearch
+ ignore_errors: yes
+
+ - name: "Cleanup existing support infrastructure"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+ ignore_errors: yes
+
+ - name: "Cleanup existing secrets"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+ ignore_errors: yes
+ register: clean_result
+ failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+
+ - name: "Cleanup existing logging deployers"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+
+
+ - name: "Cleanup logging project"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+
+
+ - name: "Remove deployer template"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+ register: delete_ouput
+ failed_when: delete_ouput.rc == 1 and 'exists' not in delete_ouput.stderr
+
+
+ - name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
+
+ - debug: msg="Success!"
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
new file mode 100644
index 000000000..d8a5b62a0
--- /dev/null
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -0,0 +1,105 @@
+---
+ - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+ when: "openshift_hosted_logging_hostname is not defined or
+ openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+ openshift_hosted_logging_master_public_url is not defined"
+
+ - name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+ - name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+
+ - name: "Create logging project"
+ command: {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+
+ - name: "Changing projects"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+
+ - name: "Creating logging deployer secret"
+ command: " {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}"
+ register: secret_output
+ failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+ - name: "Copy serviceAccount file"
+ copy: dest=/tmp/logging-deployer-sa.yaml
+ src={{role_path}}/files/logging-deployer-sa.yaml
+ force=yes
+
+ - name: "Create logging-deployer service account"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /tmp/logging-deployer-sa.yaml"
+ register: deployer_output
+ failed_when: "deployer_output.rc == 1 and 'exists' not in deployer_output.stderr"
+
+ - name: "Set permissions for logging-deployer service account"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer"
+ register: permiss_output
+ failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+ - name: "Set permissions for fluentd"
+ command: {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+ - name: "Set additional permissions for fluentd"
+ command: {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+ - name: "Create deployer template"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift"
+ register: template_output
+ failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
+
+ - name: "Process the deployer template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-template -n openshift -v {{ oc_process_values }} | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+
+ - name: "Wait for image pull and deployer pod"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+ register: result
+ until: result.rc == 0
+ retries: 15
+ delay: 10
+
+ - name: "Process support template"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-support-template | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+
+ - name: "Set insecured registry"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all openshift.io/image.insecureRepository=true --overwrite"
+ when: "target_registry is defined and insecure_registry == 'true'"
+
+ - name: "Wait for imagestreams to become available"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 and 'not found' not in result.stderr
+ retries: 20
+ delay: 10
+
+ - name: "Wait for replication controllers to become available"
+ shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc | grep logging-fluentd-1"
+ register: result
+ until: result.rc == 0
+ failed_when: result.rc == 1 and 'not found' not in result.stderr
+ retries: 20
+ delay: 10
+
+
+ - name: "Scale fluentd deployment config"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}"
+
+
+ - name: "Scale fluentd replication controller"
+ command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale rc/logging-fluentd-1 --replicas={{ fluentd_replicas | default('1') }}"
+
+ - debug: msg="Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
+
+ - name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_hosted_logging/tasks/main.yaml b/roles/openshift_hosted_logging/tasks/main.yaml
new file mode 100644
index 000000000..42568597a
--- /dev/null
+++ b/roles/openshift_hosted_logging/tasks/main.yaml
@@ -0,0 +1,8 @@
+---
+- name: Cleanup logging deployment
+ include: "{{ role_path }}/tasks/cleanup_logging.yaml"
+ when: openshift_hosted_logging_cleanup | default(false) | bool
+
+- name: Deploy logging
+ include: "{{ role_path }}/tasks/deploy_logging.yaml"
+ when: not openshift_hosted_logging_cleanup | default(false) | bool
diff --git a/roles/openshift_hosted_logging/vars/main.yaml b/roles/openshift_hosted_logging/vars/main.yaml
new file mode 100644
index 000000000..586c2ab91
--- /dev/null
+++ b/roles/openshift_hosted_logging/vars/main.yaml
@@ -0,0 +1,6 @@
+kh_kv: "KIBANA_HOSTNAME={{ openshift_hosted_logging_hostname | quote }}"
+es_cs_kv: "ES_CLUSTER_SIZE={{ openshift_hosted_logging_elasticsearch_cluster_size | quote }}"
+pmu_kv: "PUBLIC_MASTER_URL={{ openshift_hosted_logging_master_public_url | quote }}"
+ip_kv: "{{ 'IMAGE_PREFIX=' ~ target_registry | quote if target_registry is defined else '' }}"
+oc_process_values: "{{ kh_kv }},{{ es_cs_kv }},{{ pmu_kv }},{{ ip_kv }}"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index d2ff1b4b7..2a651df65 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -18,7 +18,7 @@
failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
changed_when: osmiq_create_mi_project.rc == 0
-- name: Create Service Account
+- name: Create Admin Service Account
shell: >
echo {{ manageiq_service_account | to_json | quote }} |
{{ openshift.common.client_binary }} create
@@ -29,6 +29,17 @@
failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0"
changed_when: osmiq_create_service_account.rc == 0
+- name: Create Image Inspector Service Account
+ shell: >
+ echo {{ manageiq_image_inspector_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n management-infra
+ --config={{manage_iq_tmp_conf}}
+ -f -
+ register: osmiq_create_service_account
+ failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0"
+ changed_when: osmiq_create_service_account.rc == 0
+
- name: Create Cluster Role
shell: >
echo {{ manageiq_cluster_role | to_json | quote }} |
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
index 77e1c304b..69ee2cb4c 100644
--- a/roles/openshift_manageiq/vars/main.yml
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -15,6 +15,12 @@ manageiq_service_account:
metadata:
name: management-admin
+manageiq_image_inspector_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: inspector-admin
+
manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
manage_iq_tasks:
@@ -22,3 +28,5 @@ manage_iq_tasks:
- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
+ - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
+ - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index f34fa7b74..5dd28d52a 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -9,7 +9,8 @@
- name: create the service account
shell: >
echo {{ lookup('template', '../templates/serviceaccount.j2')
- | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }} create -f -
+ | from_yaml | to_json | quote }} | {{ openshift.common.client_binary }}
+ -n {{ openshift_serviceaccounts_namespace }} create -f -
when: item.1.rc != 0
with_together:
- openshift_serviceaccounts_names
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 705066b35..1f2117b2c 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -93,6 +93,18 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.pv.space.total
+ description: Shows the total space of pv
+ value_type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.space.available
+ description: Shows the available space of pv
+ value_type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.pv.total.count
description: Total number of Persistent Volumes in the Openshift Cluster
value_type: int
@@ -279,6 +291,29 @@ g_template_openshift_master:
applications:
- Openshift Master Metrics
+ zdiscoveryrules:
+ - name: disc.pv
+ key: disc.pv
+ lifetime: 1
+ description: "Dynamically register the Persistent Volumes"
+
+ zitemprototypes:
+ - discoveryrule_key: disc.pv
+ name: "disc.pv.count.{#OSO_PV}"
+ key: "disc.pv.count[{#OSO_PV}]"
+ value_type: int
+ description: "Number of PV's of this size"
+ applications:
+ - Openshift Master
+
+ - discoveryrule_key: disc.pv
+ name: "disc.pv.count.available.{#OSO_PV}"
+ key: "disc.pv.count.available[{#OSO_PV}]"
+ value_type: int
+ description: "Number of PV's of this size that are available"
+ applications:
+ - Openshift Master
+
ztriggers:
- name: 'Openshift Master process not running on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'