summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--bin/openshift_ansible/awsutil.py10
-rwxr-xr-xbin/oscp79
-rwxr-xr-xbin/ossh92
-rwxr-xr-xbin/ossh_bash_completion6
-rw-r--r--bin/ossh_zsh_completion6
-rw-r--r--openshift-ansible.spec18
-rw-r--r--roles/lib_openshift_api/library/oc_secrets.py379
8 files changed, 443 insertions, 149 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 80563b79a..be63f57af 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.55-1 ./
+3.0.56-1 ./
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index eba11e851..11651f087 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -137,13 +137,13 @@ class AwsUtil(object):
inst_by_env = {}
for _, host in inv['_meta']['hostvars'].items():
# If you don't have an environment tag, we're going to ignore you
- if 'ec2_tag_environment' not in host:
+ if 'oo_environment' not in host:
continue
- if host['ec2_tag_environment'] not in inst_by_env:
- inst_by_env[host['ec2_tag_environment']] = {}
- host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
- inst_by_env[host['ec2_tag_environment']][host_id] = host
+ if host['oo_environment'] not in inst_by_env:
+ inst_by_env[host['oo_environment']] = {}
+ host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
+ inst_by_env[host['oo_environment']][host_id] = host
return inst_by_env
diff --git a/bin/oscp b/bin/oscp
index 203b9d6da..4d3286ed8 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -14,6 +14,8 @@ CONFIG_MAIN_SECTION = 'main'
class Oscp(object):
def __init__(self):
+ self.host = None
+ self.user = ''
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
# Default the config path to /etc
@@ -56,8 +58,6 @@ class Oscp(object):
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-e', '--env',
- action="store", help="Environment where this server exists.")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', default=False,
@@ -82,8 +82,6 @@ class Oscp(object):
def process_host(self):
'''Determine host name and user name for SSH.
'''
- self.user = ''
-
# is the first param passed a valid file?
if os.path.isfile(self.args.src) or os.path.isdir(self.args.src):
self.local_src = True
@@ -108,76 +106,34 @@ class Oscp(object):
self.host = search.groups()[0]
self.path = search.groups()[1]
- if self.args.env:
- self.env = self.args.env
- elif "." in self.host:
- self.host, self.env = self.host.split(".")
- else:
- self.env = None
-
def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format
- equals:
-
- dict['environment'] = [{'servername' : {}}, ]
- '''
+ '''Query our host inventory and return a dict where the format '''
if refresh_cache:
- self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
+ self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
else:
- self.host_inventory = self.aws.build_host_dict_by_env()
+ self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
def select_host(self):
'''select host attempts to match the host specified
on the command line with a list of hosts.
'''
- results = []
- for env in self.host_inventory.keys():
- for hostname, server_info in self.host_inventory[env].items():
- if hostname.split(':')[0] == self.host:
- results.append((hostname, server_info))
-
- # attempt to select the correct environment if specified
- if self.env:
- results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
-
- if results:
- return results
+ results = None
+ if self.host_inventory.has_key(self.host):
+ results = (self.host, self.host_inventory[self.host])
else:
print "Could not find specified host: %s." % self.host
# default - no results found.
- return None
+ return results
def list_hosts(self, limit=None):
'''Function to print out the host inventory.
Takes a single parameter to limit the number of hosts printed.
'''
-
- if self.env:
- results = self.select_host()
- if len(results) == 1:
- hostname, server_info = results[0]
- sorted_keys = server_info.keys()
- sorted_keys.sort()
- for key in sorted_keys:
- print '{0:<35} {1}'.format(key, server_info[key])
- else:
- for host_id, server_info in results[:limit]:
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- if limit:
- print
- print 'Showing only the first %d results...' % limit
- print
-
- else:
- for env, host_ids in self.host_inventory.items():
- for host_id, server_info in host_ids.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
+ for host_id, server_info in self.host_inventory.items():
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
def scp(self):
'''scp files to or from a specified host
@@ -203,17 +159,10 @@ class Oscp(object):
if not results:
return # early exit, no results
- if len(results) > 1:
- print "Multiple results found for %s." % self.host
- for result in results:
- print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
- return # early exit, too many results
-
# Assume we have one and only one.
- hostname, server_info = results[0]
- dns = server_info['oo_public_ip']
+ server_info = results[1]
- host_str = "%s%s%s" % (self.user, dns, self.path)
+ host_str = "%s%s%s" % (self.user, server_info['oo_public_ip'], self.path)
if self.local_src:
scp_args.append(self.args.src)
diff --git a/bin/ossh b/bin/ossh
index 5e2506638..0dd2fb741 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -14,6 +14,8 @@ CONFIG_MAIN_SECTION = 'main'
class Ossh(object):
def __init__(self):
+ self.user = None
+ self.host = None
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
# Default the config path to /etc
@@ -54,8 +56,6 @@ class Ossh(object):
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-e', '--env', action="store",
- help="Which environment to search for the host ")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
parser.add_argument('-v', '--verbose', default=False,
@@ -83,91 +83,48 @@ class Ossh(object):
def process_host(self):
'''Determine host name and user name for SSH.
'''
- self.env = None
- self.user = None
-
- re_env = re.compile("\.(" + "|".join(self.host_inventory.keys()) + ")")
- search = re_env.search(self.args.host)
- if self.args.env:
- self.env = self.args.env
- elif search:
- # take the first?
- self.env = search.groups()[0]
- # remove env from hostname command line arg if found
- if search:
- self.args.host = re_env.split(self.args.host)[0]
+ parts = self.args.host.split('@')
# parse username if passed
- if '@' in self.args.host:
- self.user, self.host = self.args.host.split('@')
+ if len(parts) > 1:
+ self.user = parts[0]
+ self.host = parts[1]
else:
- self.host = self.args.host
+ self.host = parts[0]
+
if self.args.login_name:
self.user = self.args.login_name
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format
- equals:
- dict['servername'] = dns_name
- '''
+ def get_hosts(self, refresh_cache=False):
+ '''Query our host inventory and return a dict where the format '''
if refresh_cache:
- self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
+ self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
else:
- self.host_inventory = self.aws.build_host_dict_by_env()
+ self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
def select_host(self):
'''select host attempts to match the host specified
on the command line with a list of hosts.
'''
- results = []
- for env in self.host_inventory.keys():
- for hostname, server_info in self.host_inventory[env].items():
- if hostname.split(':')[0] == self.host:
- results.append((hostname, server_info))
-
- # attempt to select the correct environment if specified
- if self.env:
- results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
-
- if results:
- return results
+ results = None
+ if self.host_inventory.has_key(self.host):
+ results = (self.host, self.host_inventory[self.host])
else:
print "Could not find specified host: %s." % self.host
# default - no results found.
- return None
+ return results
def list_hosts(self, limit=None):
'''Function to print out the host inventory.
Takes a single parameter to limit the number of hosts printed.
'''
-
- if self.env:
- results = self.select_host()
- if len(results) == 1:
- hostname, server_info = results[0]
- sorted_keys = server_info.keys()
- sorted_keys.sort()
- for key in sorted_keys:
- print '{0:<35} {1}'.format(key, server_info[key])
- else:
- for host_id, server_info in results[:limit]:
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- if limit:
- print
- print 'Showing only the first %d results...' % limit
- print
-
- else:
- for env, host_ids in self.host_inventory.items():
- for host_id, server_info in host_ids.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
+ for host_id, server_info in self.host_inventory.items():
+ print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+ '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
def ssh(self):
'''SSH to a specified host
@@ -193,17 +150,10 @@ class Ossh(object):
if not results:
return # early exit, no results
- if len(results) > 1:
- print "Multiple results found for %s." % self.host
- for result in results:
- print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
- return # early exit, too many results
-
# Assume we have one and only one.
- _, server_info = results[0]
- dns = server_info['oo_public_ip']
+ server_info = results[1]
- ssh_args.append(dns)
+ ssh_args.append(server_info['oo_public_ip'])
#last argument
if self.args.command:
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 77b770a43..dcbde3e51 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,12 +1,12 @@
__ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
+ /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
+ /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
+ /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
fi
}
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
index 170ca889b..94ea61dab 100644
--- a/bin/ossh_zsh_completion
+++ b/bin/ossh_zsh_completion
@@ -2,13 +2,13 @@
_ossh_known_hosts(){
if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
+ print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])')
elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
+ print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
+ print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
fi
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 4beeed3dd..133d8112a 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.55
+Version: 3.0.56
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -279,6 +279,22 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Mar 14 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.56-1
+- Updating our metadata tooling to work without env (kwoodson@redhat.com)
+- improve ordering of systemd units (jdetiber@redhat.com)
+- Docker role refactor (jdetiber@redhat.com)
+- Ensure is_containerized is cast as bool. (abutcher@redhat.com)
+- Sync latest to v1.2 (sdodson@redhat.com)
+- Sync with latest image stream and templates (sdodson@redhat.com)
+- Allow origin version to be passed in as an argument (sdodson@redhat.com)
+- Add support for Openstack integration (sbaubeau@redhat.com)
+- Expose log level on the monitor (abhat@nuagenetworks.net)
+- openshift_facts: Safe cast additional bools (smunilla@redhat.com)
+- openshift-ansible: Wrap boolean facts (smunilla@redhat.com)
+- fixed copr releasers file (twiest@redhat.com)
+- Libvirt provider fixes (jdetiber@redhat.com)
+- Support log level configuration for plugin (abhat@nuagenetworks.net)
+
* Wed Mar 09 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.55-1
- Bug 1315564 - upgrade to ose3.2 failed on Atomic Hosts (bleanhar@redhat.com)
- Bug 1315563 - Upgrade failed to containerized install OSE 3.1 on RHEL
diff --git a/roles/lib_openshift_api/library/oc_secrets.py b/roles/lib_openshift_api/library/oc_secrets.py
new file mode 100644
index 000000000..841c14692
--- /dev/null
+++ b/roles/lib_openshift_api/library/oc_secrets.py
@@ -0,0 +1,379 @@
+#!/usr/bin/env python
+'''
+module for openshift cloud secrets
+'''
+# Examples:
+#
+# # to initiate and use /etc/origin/master/admin.kubeconfig file for auth
+# - name: list secrets
+# oc_secrets:
+# state: list
+# namespace: default
+#
+# # To get a specific secret named 'mysecret'
+# - name: list secrets
+# oc_secrets:
+# state: list
+# namespace: default
+# name: mysecret
+#
+# # To create a secret:
+# # This module expects the user to place the files on the remote server and pass them in.
+# - name: create a secret from file
+# oc_secrets:
+# state: present
+# namespace: default
+# name: mysecret
+# files:
+# - /tmp/config.yml
+# - /tmp/passwords.yml
+# delete_after: False
+
+# # To create a secret:
+# # This module expects the user to place the files on the remote server and pass them in.
+# - name: create a secret from content
+# oc_secrets:
+# state: present
+# namespace: default
+# name: mysecret
+# contents:
+# - path: /tmp/config.yml
+# content: "value=True\n"
+# - path: /tmp/passwords.yml
+# content: "test1\ntest2\ntest3\ntest4\n"
+#
+
+import os
+import shutil
+import json
+import atexit
+
+class OpenShiftOC(object):
+ ''' Class to wrap the oc command line tools
+ '''
+ def __init__(self,
+ namespace,
+ secret_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ self.namespace = namespace
+ self.name = secret_name
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ def get_secrets(self):
+ '''return a secret by name '''
+ cmd = ['get', 'secrets', '-o', 'json', '-n', self.namespace]
+ if self.name:
+ cmd.append(self.name)
+
+ rval = self.oc_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if rval.has_key('items'):
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def delete_secret(self):
+ '''return all pods '''
+ return self.oc_cmd(['delete', 'secrets', self.name, '-n', self.namespace])
+
+ def secret_new(self, files):
+ '''Create a secret with all pods '''
+ secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
+ cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
+ cmd.extend(secrets)
+
+ return self.oc_cmd(cmd)
+
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+ for sfile in data:
+ with open(sfile['path'], 'w') as fds:
+ fds.write(sfile['content'])
+ files.append(sfile['path'])
+
+ # Register cleanup when module is done
+ atexit.register(OpenShiftOC.cleanup, files)
+ return files
+
+ def update_secret(self, files, force=False):
+ '''run update secret
+
+ This receives a list of file names and converts it into a secret.
+ The secret is then written to disk and passed into the `oc replace` command.
+ '''
+ secret = self.prep_secret(files)
+ if secret['returncode'] != 0:
+ return secret
+
+ sfile_path = '/tmp/%s' % secret['results']['metadata']['name']
+ with open(sfile_path, 'w') as sfd:
+ sfd.write(json.dumps(secret['results']))
+
+ cmd = ['replace', '-f', sfile_path]
+ if force:
+ cmd = ['replace', '--force', '-f', sfile_path]
+
+ atexit.register(OpenShiftOC.cleanup, [sfile_path])
+
+ return self.oc_cmd(cmd)
+
+ def prep_secret(self, files):
+ ''' return what the secret would look like if created
+ This is accomplished by passing -ojson. This will most likely change in the future
+ '''
+ secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
+ cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
+ cmd.extend(secrets)
+
+ return self.oc_cmd(cmd, output=True)
+
+ def oc_cmd(self, cmd, output=False):
+ '''Base command for oc '''
+ cmds = ['/usr/bin/oc']
+ cmds.extend(cmd)
+
+ results = ''
+
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+ proc.wait()
+ if proc.returncode == 0:
+ if output:
+ try:
+ results = json.loads(proc.stdout.read())
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ results = err.message
+
+ if self.verbose:
+ print proc.stderr.read()
+ print results
+ print
+
+ return {"returncode": proc.returncode, "results": results}
+
+ return {"returncode": proc.returncode,
+ "stderr": proc.stderr.read(),
+ "stdout": proc.stdout.read(),
+ "results": {}
+ }
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if find_result(results, _name):
+ return True
+
+ return False
+
+def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ #print "%s == %s" % (result['metadata']['name'], name)
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+# Disabling too-many-branches. This is a yaml dictionary comparison function
+# pylint: disable=too-many-branches,too-many-return-statements
+def check_def_equal(user_def, result_def, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['creationTimestamp', 'selfLink', 'resourceVersion', 'uid', 'namespace']
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ return False
+
+ # lists should be identical
+ if value != user_def[key]:
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = check_def_equal(user_def[key], value)
+ if not result:
+ if debug:
+ print "dict returned false"
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+
+def main():
+ '''
+ ansible oc module for secrets
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ delete_after=dict(default=False, type='bool'),
+ contents=dict(default=None, type='list'),
+ force=dict(default=False, type='bool'),
+ ),
+ mutually_exclusive=[["contents", "files"]],
+
+ supports_check_mode=True,
+ )
+ occmd = OpenShiftOC(module.params['namespace'],
+ module.params['name'],
+ kubeconfig=module.params['kubeconfig'],
+ verbose=module.params['debug'])
+
+ state = module.params['state']
+
+ api_rval = occmd.get_secrets()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ module.exit_json(changed=False, results=api_rval['results'], state="list")
+
+ if not module.params['name']:
+ module.fail_json(msg='Please specify a name when state is absent|present.')
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not exists(api_rval['results'], module.params['name']):
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = occmd.delete_secret()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ if module.params['files']:
+ files = module.params['files']
+ elif module.params['contents']:
+ files = OpenShiftOC.create_files_from_contents(module.params['contents'])
+ else:
+ module.fail_json(msg='Either specify files or contents.')
+
+ ########
+ # Create
+ ########
+ if not exists(api_rval['results'], module.params['name']):
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = occmd.secret_new(files)
+
+ # Remove files
+ if files and module.params['delete_after']:
+ OpenShiftOC.cleanup(files)
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ secret = occmd.prep_secret(files)
+
+ if secret['returncode'] != 0:
+ module.fail_json(msg=secret)
+
+ if check_def_equal(secret['results'], api_rval['results'][0]):
+
+ # Remove files
+ if files and module.params['delete_after']:
+ OpenShiftOC.cleanup(files)
+
+ module.exit_json(changed=False, results=secret['results'], state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = occmd.update_secret(files, force=module.params['force'])
+
+ # Remove files
+ if files and module.params['delete_after']:
+ OpenShiftOC.cleanup(files)
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()