summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason DeTiberus <detiber@gmail.com>2016-05-27 01:56:49 -0400
committerJason DeTiberus <detiber@gmail.com>2016-05-27 01:56:49 -0400
commit7a189730b1b6dddbed174586afe752615e60d948 (patch)
tree84ddfacb1fba4e54e26629546d080631bdcb8572
parentdc0d59545efd2e3cc4c7eedbc1e008fa6b382e6d (diff)
parent4c911eeed06fa433b5ac78dbf644ce923d7411e0 (diff)
downloadopenshift-7a189730b1b6dddbed174586afe752615e60d948.tar.gz
openshift-7a189730b1b6dddbed174586afe752615e60d948.tar.bz2
openshift-7a189730b1b6dddbed174586afe752615e60d948.tar.xz
openshift-7a189730b1b6dddbed174586afe752615e60d948.zip
Merge pull request #1947 from abutcher/rm-bin
Cleanup following move to openshift-tools
-rw-r--r--bin/README_SHELL_COMPLETION37
-rwxr-xr-xbin/ohi147
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
l---------bin/openshift_ansible/aws1
-rw-r--r--bin/openshift_ansible/awsutil.py268
l---------bin/openshift_ansible/multi_inventory.py1
-rw-r--r--bin/openshift_ansible/utils.py30
-rwxr-xr-xbin/opscp151
-rwxr-xr-xbin/opssh154
-rwxr-xr-xbin/oscp184
-rwxr-xr-xbin/ossh172
-rwxr-xr-xbin/ossh_bash_completion52
-rw-r--r--bin/ossh_zsh_completion31
-rw-r--r--bin/zsh_functions/_ossh49
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml47
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--test/env-setup8
-rw-r--r--test/units/README.md7
-rwxr-xr-xtest/units/multi_inventory_test.py114
-rwxr-xr-xtest/units/yedit_test.py143
25 files changed, 0 insertions, 1659 deletions
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
deleted file mode 100644
index 49bba3acc..000000000
--- a/bin/README_SHELL_COMPLETION
+++ /dev/null
@@ -1,37 +0,0 @@
-# completion is available for ossh/oscp
-
-ossh/oscp uses a dynamic inventory cache in order to lookup
-hostnames and translate them to something meaningful
-such as an IP address or dns name.
-
-This allows us to treat our servers as cattle and not as pets.
-
-If you have not run the ossh command and it has not laid down
-a cache file the completions will not be available.
-
-You can populate the cache by running `ossh --list`. This
-will populate the cache file and the completions should
-become available.
-
-This script will look at the cached version of your
-multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
-It will then parse a few {host}.{env} out of the json
-and return them to be completable.
-
-# BASH
-In order to setup bash completion, source the following script:
-/path/to/repository/openshift-ansible/bin/ossh_bash_completion
-
-# ZSH
-In order to setup zsh completion, you will need to verify
-that the _ossh_zsh_completion script is somewhere in the path
-of $fpath.
-
-Once $fpath includes the _ossh_zsh_completion script then you should
-run `exec zsh`. This will then allow you to call `ossh host[TAB]`
-for a list of completions.
-
-Before completing the final step, zsh keeps its own cache in
-~/.zcompdump of the known functions and variables. In order to
-refresh with new variables and completion arrays you might need
-to `rm ~/.zcompdump` before running `exec zsh`.
diff --git a/bin/ohi b/bin/ohi
deleted file mode 100755
index 9c2ce8432..000000000
--- a/bin/ohi
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-'''
-Ohi = Openshift Host Inventory
-
-This script provides an easy way to look at your host inventory.
-
-This depends on multi_inventory being setup correctly.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import sys
-import os
-import ConfigParser
-
-from openshift_ansible import awsutil
-from openshift_ansible import utils
-from openshift_ansible.awsutil import ArgumentError
-
-CONFIG_MAIN_SECTION = 'main'
-CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-
-
-class Ohi(object):
- '''
- Class for managing openshift host inventory
- '''
- def __init__(self):
- self.host_type_aliases = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.args = None
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil(self.host_type_aliases)
-
- def run(self):
- '''
- Call into awsutil and retrieve the desired hosts and environments
- '''
-
- if self.args.list_host_types:
- self.aws.print_host_types()
- return 0
-
- if self.args.v3:
- version = '3'
- elif self.args.all_versions:
- version = 'all'
- else:
- version = '2'
-
- hosts = self.aws.get_host_list(clusters=self.args.cluster,
- host_type=self.args.host_type,
- sub_host_type=self.args.sub_host_type,
- envs=self.args.env,
- version=version,
- cached=self.args.cache_only)
-
- if hosts is None:
- # We weren't able to determine what they wanted to do
- raise ArgumentError("Invalid combination of arguments")
-
- if self.args.ip:
- hosts = self.aws.convert_to_ip(hosts)
-
- for host in sorted(hosts, key=utils.normalize_dnsname):
- if self.args.user:
- print "%s@%s" % (self.args.user, host)
- else:
- print host
-
- return 0
-
- def parse_config_file(self):
- '''
- Parse the config file for ohi
- '''
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- self.host_type_aliases = {}
- if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
- for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
- value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
- self.host_type_aliases[alias] = value
-
- def parse_cli_args(self):
- """Setup the command line parser with the options we want
- """
-
- parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
-
- parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
- parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
-
- parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
- parser.add_argument('-e', '--env', action="append", help="Which environment to use")
-
- parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
-
- parser.add_argument('-s', '--sub-host-type', action="store", help="Which sub host type to use")
-
- parser.add_argument('-l', '--user', action='store', default=None, help='username')
-
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Retrieve the host inventory by cache only. Default is false.')
-
- parser.add_argument('--v2', action='store_true', default=True,
- help='Specify the openshift version. Default is 2')
-
- parser.add_argument('--v3', action='store_true', default=False,
- help='Specify the openshift version.')
-
- parser.add_argument('--ip', action='store_true', default=False,
- help='Return ip address only.')
-
- parser.add_argument('--all-versions', action='store_true', default=False,
- help='Specify the openshift version. Return all versions')
-
- self.args = parser.parse_args()
-
-def main():
- '''
- Ohi will do its work here
- '''
- if len(sys.argv) == 1:
- print "\nError: No options given. Use --help to see the available options\n"
- sys.exit(0)
-
- try:
- ohi = Ohi()
- exitcode = ohi.run()
- sys.exit(exitcode)
- except ArgumentError as err:
- print "\nError: %s\n" % err.message
-
-if __name__ == '__main__':
- main()
-
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
deleted file mode 100644
index 8786dfc13..000000000
--- a/bin/openshift_ansible.conf.example
+++ /dev/null
@@ -1,6 +0,0 @@
-#[main]
-#inventory = /usr/share/ansible/inventory/multi_inventory.py
-
-#[host_type_aliases]
-#host-type-one = aliasa,aliasb
-#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/bin/openshift_ansible/__init__.py
+++ /dev/null
diff --git a/bin/openshift_ansible/aws b/bin/openshift_ansible/aws
deleted file mode 120000
index eb0575b4d..000000000
--- a/bin/openshift_ansible/aws
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/aws/ \ No newline at end of file
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
deleted file mode 100644
index 11651f087..000000000
--- a/bin/openshift_ansible/awsutil.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-"""This module comprises Aws specific utility functions."""
-
-import os
-import re
-
-# Buildbot does not have multi_inventory installed
-#pylint: disable=no-name-in-module
-from openshift_ansible import multi_inventory
-
-class ArgumentError(Exception):
- """This class is raised when improper arguments are passed."""
-
- def __init__(self, message):
- """Initialize an ArgumentError.
-
- Keyword arguments:
- message -- the exact error message being raised
- """
- super(ArgumentError, self).__init__()
- self.message = message
-
-class AwsUtil(object):
- """This class contains the AWS utility functions."""
-
- def __init__(self, host_type_aliases=None):
- """Initialize the AWS utility class.
-
- Keyword arguments:
- host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
- """
-
- self.alias_lookup = {}
- host_type_aliases = host_type_aliases or {}
-
- self.host_type_aliases = host_type_aliases
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- self.setup_host_type_alias_lookup()
-
- def setup_host_type_alias_lookup(self):
- """Sets up the alias to host-type lookup table."""
- for key, values in self.host_type_aliases.iteritems():
- for value in values:
- self.alias_lookup[value] = key
-
- @staticmethod
- def get_inventory(args=None, cached=False):
- """Calls the inventory script and returns a dictionary containing the inventory."
-
- Keyword arguments:
- args -- optional arguments to pass to the inventory script
- """
- minv = multi_inventory.MultiInventory(args)
- if cached:
- minv.get_inventory_from_cache()
- else:
- minv.run()
- return minv.result
-
- def get_clusters(self):
- """Searches for cluster tags in the inventory and returns all of the clusters found."""
- pattern = re.compile(r'^oo_clusterid_(.*)')
-
- clusters = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- clusters.append(matched.group(1))
-
- clusters.sort()
- return clusters
-
- def get_environments(self):
- """Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^oo_environment_(.*)')
-
- envs = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- envs.append(matched.group(1))
-
- envs.sort()
- return envs
-
- def get_host_types(self):
- """Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^oo_hosttype_(.*)')
-
- host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- host_types.append(matched.group(1))
-
- host_types.sort()
- return host_types
-
- def get_sub_host_types(self):
- """Searches for sub-host-type tags in the inventory and returns all sub-host-types found."""
- pattern = re.compile(r'^oo_subhosttype_(.*)')
-
- sub_host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- sub_host_types.append(matched.group(1))
-
- sub_host_types.sort()
- return sub_host_types
-
- def get_security_groups(self):
- """Searches for security_groups in the inventory and returns all SGs found."""
- pattern = re.compile(r'^security_group_(.*)')
-
- groups = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- groups.append(matched.group(1))
-
- groups.sort()
- return groups
-
- def build_host_dict_by_env(self, args=None):
- """Searches the inventory for hosts in an env and returns their hostvars."""
- args = args or []
- inv = self.get_inventory(args)
-
- inst_by_env = {}
- for _, host in inv['_meta']['hostvars'].items():
- # If you don't have an environment tag, we're going to ignore you
- if 'oo_environment' not in host:
- continue
-
- if host['oo_environment'] not in inst_by_env:
- inst_by_env[host['oo_environment']] = {}
- host_id = "%s:%s" % (host['oo_name'], host['oo_id'])
- inst_by_env[host['oo_environment']][host_id] = host
-
- return inst_by_env
-
- def print_host_types(self):
- """Gets the list of host types and aliases and outputs them in columns."""
- host_types = self.get_host_types()
- ht_format_str = "%35s"
- alias_format_str = "%-20s"
- combined_format_str = ht_format_str + " " + alias_format_str
-
- print
- print combined_format_str % ('Host Types', 'Aliases')
- print combined_format_str % ('----------', '-------')
-
- for host_type in host_types:
- aliases = []
- if host_type in self.host_type_aliases:
- aliases = self.host_type_aliases[host_type]
- print combined_format_str % (host_type, ", ".join(aliases))
- else:
- print ht_format_str % host_type
- print
-
- def resolve_host_type(self, host_type):
- """Converts a host-type alias into a host-type.
-
- Keyword arguments:
- host_type -- The alias or host_type to look up.
-
- Example (depends on aliases defined in config file):
- host_type = ex-node
- returns: openshift-node
- """
- if self.alias_lookup.has_key(host_type):
- return self.alias_lookup[host_type]
- return host_type
-
- @staticmethod
- def gen_version_tag(ver):
- """Generate the version tag
- """
- return "oo_version_%s" % ver
-
- @staticmethod
- def gen_clusterid_tag(clu):
- """Generate the clusterid tag
- """
- return "oo_clusterid_%s" % clu
-
- @staticmethod
- def gen_env_tag(env):
- """Generate the environment tag
- """
- return "oo_environment_%s" % env
-
- def gen_host_type_tag(self, host_type, version):
- """Generate the host type tag
- """
- if version == '2':
- host_type = self.resolve_host_type(host_type)
- return "oo_hosttype_%s" % host_type
-
- @staticmethod
- def gen_sub_host_type_tag(sub_host_type):
- """Generate the host type tag
- """
- return "oo_subhosttype_%s" % sub_host_type
-
- # This function uses all of these params to perform a filters on our host inventory.
- # pylint: disable=too-many-arguments
- def get_host_list(self, clusters=None, host_type=None, sub_host_type=None, envs=None, version=None, cached=False):
- """Get the list of hosts from the inventory using host-type and environment
- """
- retval = set([])
- envs = envs or []
-
- inv = self.get_inventory(cached=cached)
-
- retval.update(inv.get('all_hosts', []))
-
- if clusters:
- cluster_hosts = set([])
- if len(clusters) > 1:
- for cluster in clusters:
- clu_tag = AwsUtil.gen_clusterid_tag(cluster)
- cluster_hosts.update(inv.get(clu_tag, []))
- else:
- cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
-
- retval.intersection_update(cluster_hosts)
-
- if envs:
- env_hosts = set([])
- if len(envs) > 1:
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- env_hosts.update(inv.get(env_tag, []))
- else:
- env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
-
- retval.intersection_update(env_hosts)
-
- if host_type:
- retval.intersection_update(inv.get(self.gen_host_type_tag(host_type, version), []))
-
- if sub_host_type:
- retval.intersection_update(inv.get(self.gen_sub_host_type_tag(sub_host_type), []))
-
- if version != 'all':
- retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
-
- return list(retval)
-
- def convert_to_ip(self, hosts, cached=False):
- """convert a list of host names to ip addresses"""
-
- inv = self.get_inventory(cached=cached)
- ips = []
- for host in hosts:
- ips.append(inv['_meta']['hostvars'][host]['oo_public_ip'])
-
- return ips
diff --git a/bin/openshift_ansible/multi_inventory.py b/bin/openshift_ansible/multi_inventory.py
deleted file mode 120000
index b40feec07..000000000
--- a/bin/openshift_ansible/multi_inventory.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_inventory.py \ No newline at end of file
diff --git a/bin/openshift_ansible/utils.py b/bin/openshift_ansible/utils.py
deleted file mode 100644
index e6243aa5a..000000000
--- a/bin/openshift_ansible/utils.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-''' The purpose of this module is to contain small utility functions.
-'''
-
-import re
-
-def normalize_dnsname(name, padding=10):
- ''' The purpose of this function is to return a dns name with zero padding,
- so that it sorts properly (as a human would expect).
-
- Example: name=ex-lrg-node10.prod.rhcloud.com
- Returns: ex-lrg-node0000000010.prod.rhcloud.com
-
- Example Usage:
- sorted(['a3.example.com', 'a10.example.com', 'a1.example.com'],
- key=normalize_dnsname)
-
- Returns: ['a1.example.com', 'a3.example.com', 'a10.example.com']
- '''
- parts = re.split(r'(\d+)', name)
- retval = []
- for part in parts:
- if re.match(r'^\d+$', part):
- retval.append(part.zfill(padding))
- else:
- retval.append(part)
-
- return ''.join(retval)
diff --git a/bin/opscp b/bin/opscp
deleted file mode 100755
index 4bfe166f6..000000000
--- a/bin/opscp
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opscp [OPTIONS] local remote
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -r, --recursive recusively copy directories (OPTIONAL)
-
-Example: opscp -t ex-srv -e stg -l irb2 foo.txt /home/irb2/foo.txt
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pscp.pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/opssh b/bin/opssh
deleted file mode 100755
index 0113e7216..000000000
--- a/bin/opssh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opssh [OPTIONS] command [...]
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -c CLUSTER, --cluster CLUSTER
- which cluster to use
- -e ENV, --env ENV which environment to use
- --v3 When working with v3 environments. v2 by default
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -i, --inline inline aggregated output and error for each server
- --inline-stdout inline standard output for each server
- -I, --send-input read from standard input and send as input to ssh
- -P, --print print output as we get it
-
-Example: opssh -t ex-srv -e stg -l irb2 --outdir /tmp/foo uptime
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -c)
- shift # get past the option
- CLUSTER=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --v3)
- OPENSHIFT_VERSION="--v3 --ip"
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-CMD=""
-if [ -n "$CLUSTER" ] ; then
- CMD="$CMD -c $CLUSTER"
-fi
-
-if [ -n "$ENV" ] ; then
- CMD="$CMD -e $ENV"
-fi
-
-if [ -n "$HOST_TYPE" ] ; then
- CMD="$CMD -t $HOST_TYPE"
-fi
-
-if [ -n "$OPENSHIFT_VERSION" ] ; then
- CMD="$CMD $OPENSHIFT_VERSION"
-fi
-
-if [ -n "$CMD" ] ; then
- HOSTS="$(ohi $CMD 2>/dev/null)"
- OHI_ECODE=$?
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/oscp b/bin/oscp
deleted file mode 100755
index 4d3286ed8..000000000
--- a/bin/oscp
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Oscp(object):
- def __init__(self):
- self.host = None
- self.user = ''
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- # parse host and user
- self.process_host()
-
- self.aws = awsutil.AwsUtil()
-
- # get a dict of host inventory
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- if (self.args.src == '' or self.args.dest == '') and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.host
- print self.args
-
- # perform the scp
- if self.args.list:
- self.list_hosts()
- else:
- self.scp()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-r', '--recurse', action='store_true', default=False,
- help='Recursively copy files to or from destination.')
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oPort=22,TCPKeepAlive=yes"')
-
- parser.add_argument('src', nargs='?', default='')
- parser.add_argument('dest',nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
- # is the first param passed a valid file?
- if os.path.isfile(self.args.src) or os.path.isdir(self.args.src):
- self.local_src = True
- self.host = self.args.dest
- else:
- self.local_src = False
- self.host = self.args.src
-
- if '@' in self.host:
- re_host = re.compile("(.*@)(.*)(:.*$)")
- else:
- re_host = re.compile("(.*)(:.*$)")
-
- search = re_host.search(self.host)
-
- if search:
- if len(search.groups()) > 2:
- self.user = search.groups()[0]
- self.host = search.groups()[1]
- self.path = search.groups()[2]
- else:
- self.host = search.groups()[0]
- self.path = search.groups()[1]
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def scp(self):
- '''scp files to or from a specified host
- '''
- try:
- # shell args start with the program name in position 1
- scp_args = ['/usr/bin/scp']
-
- if self.args.verbose:
- scp_args.append('-v')
-
- if self.args.recurse:
- scp_args.append('-r')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- scp_args.append("-o%s" % arg)
-
- results = self.select_host()
-
- if self.args.debug: print results
-
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- host_str = "%s%s%s" % (self.user, server_info['oo_public_ip'], self.path)
-
- if self.local_src:
- scp_args.append(self.args.src)
- scp_args.append(host_str)
- else:
- scp_args.append(host_str)
- scp_args.append(self.args.dest)
-
- print "Running: %s\n" % ' '.join(scp_args)
-
- os.execve('/usr/bin/scp', scp_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- oscp = Oscp()
-
diff --git a/bin/ossh b/bin/ossh
deleted file mode 100755
index 0dd2fb741..000000000
--- a/bin/ossh
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Ossh(object):
- def __init__(self):
- self.user = None
- self.host = None
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil()
-
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- # parse host and user
- self.process_host()
-
- if self.args.host == '' and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.args
-
- # perform the SSH
- if self.args.list:
- self.list_hosts()
- else:
- self.ssh()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-c', '--command', action='store',
- help='Command to run on remote host')
- parser.add_argument('-l', '--login_name', action='store',
- help='User in which to ssh as')
-
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oForwardX11=yes,TCPKeepAlive=yes"')
- parser.add_argument('-A', default=False, action="store_true",
- help='Forward authentication agent')
- parser.add_argument('host', nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
-
- parts = self.args.host.split('@')
-
- # parse username if passed
- if len(parts) > 1:
- self.user = parts[0]
- self.host = parts[1]
- else:
- self.host = parts[0]
-
- if self.args.login_name:
- self.user = self.args.login_name
-
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format '''
- if refresh_cache:
- self.host_inventory = self.aws.get_inventory(['--refresh-cache'])['_meta']['hostvars']
- else:
- self.host_inventory = self.aws.get_inventory()['_meta']['hostvars']
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = None
- if self.host_inventory.has_key(self.host):
- results = (self.host, self.host_inventory[self.host])
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return results
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
- for host_id, server_info in self.host_inventory.items():
- print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
- '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
-
- def ssh(self):
- '''SSH to a specified host
- '''
- try:
- # shell args start with the program name in position 1
- ssh_args = ['/usr/bin/ssh']
-
- if self.user:
- ssh_args.append('-l%s' % self.user)
-
- if self.args.A:
- ssh_args.append('-A')
-
- if self.args.verbose:
- ssh_args.append('-vvv')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- ssh_args.append("-o%s" % arg)
-
- results = self.select_host()
- if not results:
- return # early exit, no results
-
- # Assume we have one and only one.
- server_info = results[1]
-
- ssh_args.append(server_info['oo_public_ip'])
-
- #last argument
- if self.args.command:
- ssh_args.append("%s" % self.args.command)
-
- print "Running: %s\n" % ' '.join(ssh_args)
-
- os.execve('/usr/bin/ssh', ssh_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- ossh = Ossh()
-
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
deleted file mode 100755
index dcbde3e51..000000000
--- a/bin/ossh_bash_completion
+++ /dev/null
@@ -1,52 +0,0 @@
-__ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys()])'
-
- fi
-}
-
-_ossh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__ossh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _ossh ossh oscp
-
-__opssh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
-
- fi
-}
-
-_opssh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__opssh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _opssh opssh
-
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
deleted file mode 100644
index 94ea61dab..000000000
--- a/bin/ossh_zsh_completion
+++ /dev/null
@@ -1,31 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if python -c 'import openshift_ansible' &>/dev/null; then
- print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join([name for name in z["_meta"]["hostvars"].keys()])')
-
- elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join([name for name in z["_meta"]["hostvars"].keys() ])')
-
- fi
-
-}
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
-#_arguments "*:Hosts:_ossh_known_hosts"
- _arguments -s : \
- "*:hosts:->hosts"
-
- case "$state" in
- hosts)
- _values 'hosts' $(_ossh_known_hosts)
- ;;
- esac
-
-}
-_ossh "$@"
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
deleted file mode 100644
index 65979c58a..000000000
--- a/bin/zsh_functions/_ossh
+++ /dev/null
@@ -1,49 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
-}
-
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
- common_arguments=(
- '(- *)'{-h,--help}'[show help]' \
- {-v,--verbose}'[enable verbose]' \
- {-d,--debug}'[debug mode]' \
- {-l,--login_name}+'[login name]:login_name' \
- {-c,--command}+'[command to run on remote host]:command' \
- {-o,--ssh_opts}+'[SSH Options to pass to SSH]:ssh options' \
- {-e,--env}+'[environtment to use]:environment:->env' \
- '--list[list out hosts]' \
- ':OP Hosts:->oo_hosts'
- )
-
- case "$service" in
- ossh)
- _arguments -C -s \
- "$common_arguments[@]" \
- ;;
-
- oscp)
- _arguments -C -s \
- "$common_arguments[@]" \
- {-r,--recurse}'[Recursive copy]' \
- ':file:_files'
- ;;
- esac
-
- case "$state" in
- oo_hosts)
- _values 'oo_hosts' $(_ossh_known_hosts)
- ;;
- env)
- _values 'environment' ops int stg prod
- ;;
- esac
-}
-
-_ossh "$@"
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
deleted file mode 100644
index b62287c12..000000000
--- a/roles/openshift_ansible_inventory/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-OpenShift Ansible Inventory
-=========
-
-Install and configure openshift-ansible-inventory.
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-oo_inventory_group
-oo_inventory_user
-oo_inventory_accounts
-oo_inventory_cache_max_age
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
deleted file mode 100644
index f53c00c80..000000000
--- a/roles/openshift_ansible_inventory/defaults/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-oo_inventory_group: root
-oo_inventory_owner: root
-oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
deleted file mode 100644
index e2db43477..000000000
--- a/roles/openshift_ansible_inventory/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
deleted file mode 100644
index 7f7387e80..000000000
--- a/roles/openshift_ansible_inventory/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: Install and configure openshift-ansible-inventory
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
deleted file mode 100644
index 05c7a5f93..000000000
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present"
- with_items:
- - openshift-ansible-inventory
- - openshift-ansible-inventory-aws
- - openshift-ansible-inventory-gce
- when: not openshift.common.is_containerized | bool
-
-- name:
- copy:
- content: "{{ oo_inventory_accounts | to_nice_yaml }}"
- dest: /etc/ansible/multi_inventory.yaml
- group: "{{ oo_inventory_group }}"
- owner: "{{ oo_inventory_owner }}"
- mode: "0640"
-
-- file:
- state: directory
- dest: /etc/ansible/inventory
- owner: root
- group: libra_ops
- mode: 0750
-
-- file:
- state: link
- src: /usr/share/ansible/inventory/multi_inventory.py
- dest: /etc/ansible/inventory/multi_inventory.py
- owner: root
- group: libra_ops
-
-# This cron uses the above location to call its job
-- name: Cron to keep cache fresh
- cron:
- name: 'multi_inventory'
- minute: '*/10'
- job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
- when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
-
-- name: Set cache location
- file:
- state: directory
- dest: "{{ oo_inventory_cache_location | dirname }}"
- owner: root
- group: libra_ops
- recurse: yes
- mode: '2770'
- when: oo_inventory_cache_location is defined
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
deleted file mode 100644
index 25c049282..000000000
--- a/roles/openshift_ansible_inventory/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for openshift_ansible_inventory
diff --git a/test/env-setup b/test/env-setup
deleted file mode 100644
index 7456a641b..000000000
--- a/test/env-setup
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-CUR_PATH=$(pwd)
-
-PREFIX_PYTHONPATH=$CUR_PATH/inventory/:$CUR_PATH/roles/lib_yaml_editor/library
-
-
-export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH
diff --git a/test/units/README.md b/test/units/README.md
deleted file mode 100644
index 78a02c3ea..000000000
--- a/test/units/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Location for python unittests.
-
-These should be run by sourcing the env-setup:
-$ source test/env-setup
-
-Then navigate to the test/units/ directory.
-$ python -m unittest multi_inventory_test
diff --git a/test/units/multi_inventory_test.py b/test/units/multi_inventory_test.py
deleted file mode 100755
index 168cd82b7..000000000
--- a/test/units/multi_inventory_test.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for MultiInventory
-'''
-
-import unittest
-import multi_inventory
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name
-class MultiInventoryTest(unittest.TestCase):
- '''
- Test class for multiInventory
- '''
-
-# def setUp(self):
-# '''setup method'''
-# pass
-
- def test_merge_simple_1(self):
- '''Testing a simple merge of 2 dictionaries'''
- a = {"key1" : 1}
- b = {"key1" : 2}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2]})
-
- def test_merge_b_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- a = {"key1" : 1}
- b = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_a_empty(self):
- '''Testing a merge of an emtpy dictionary'''
- b = {"key1" : 1}
- a = {}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": 1})
-
- def test_merge_hash_array(self):
- '''Testing a merge of a dictionary and a dictionary with an array'''
- a = {"key1" : {"hasha": 1}}
- b = {"key1" : [1, 2]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
-
- def test_merge_array_hash(self):
- '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
- a = {"key1" : [1, 2]}
- b = {"key1" : {"hasha": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
-
- def test_merge_keys_1(self):
- '''Testing a merge on a dictionary for keys'''
- a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
- b = {"key2" : {"hashb": 1}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
-
- def test_merge_recursive_1(self):
- '''Testing a recursive merge'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_array_item(self):
- '''Testing a recursive merge for an array'''
- a = {"a" : {"b": {"c": [1]}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
-
- def test_merge_recursive_hash_item(self):
- '''Testing a recursive merge for a hash'''
- a = {"a" : {"b": {"c": {"d": 1}}}}
- b = {"a" : {"b": {"c": 2}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
- def test_merge_recursive_array_hash(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : [{"b": {"c": 1}}]}
- b = {"a" : {"b": {"c": 1}}}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
- def test_merge_recursive_hash_array(self):
- '''Testing a recursive merge for an array and a hash'''
- a = {"a" : {"b": {"c": 1}}}
- b = {"a" : [{"b": {"c": 1}}]}
- result = {}
- _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
- self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-# def tearDown(self):
-# '''TearDown method'''
-# pass
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/units/yedit_test.py b/test/units/yedit_test.py
deleted file mode 100755
index 09a65e888..000000000
--- a/test/units/yedit_test.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for yedit
-'''
-
-import unittest
-import os
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-from yedit import Yedit
-
-class YeditTest(unittest.TestCase):
- '''
- Test class for yedit
- '''
- data = {'a': 'a',
- 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
- }
-
- filename = 'yedit_test.yml'
-
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- yed = Yedit(YeditTest.filename)
- yed.yaml_dict = YeditTest.data
- yed.write()
-
- def test_load(self):
- ''' Testing a get '''
- yed = Yedit('yedit_test.yml')
- self.assertEqual(yed.yaml_dict, self.data)
-
- def test_write(self):
- ''' Testing a simple write '''
- yed = Yedit('yedit_test.yml')
- yed.put('key1', 1)
- yed.write()
- self.assertTrue(yed.yaml_dict.has_key('key1'))
- self.assertEqual(yed.yaml_dict['key1'], 1)
-
- def test_write_x_y_z(self):
- '''Testing a write of multilayer key'''
- yed = Yedit('yedit_test.yml')
- yed.put('x.y.z', 'modified')
- yed.write()
- yed.load()
- self.assertEqual(yed.get('x.y.z'), 'modified')
-
- def test_delete_a(self):
- '''Testing a simple delete '''
- yed = Yedit('yedit_test.yml')
- yed.delete('a')
- yed.write()
- yed.load()
- self.assertTrue(not yed.yaml_dict.has_key('a'))
-
- def test_delete_b_c(self):
- '''Testing delete of layered key '''
- yed = Yedit('yedit_test.yml')
- yed.delete('b.c')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertFalse(yed.yaml_dict['b'].has_key('c'))
-
- def test_create(self):
- '''Testing a create '''
- os.unlink(YeditTest.filename)
- yed = Yedit('yedit_test.yml')
- yed.create('foo', 'bar')
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'] == 'bar')
-
- def test_create_content(self):
- '''Testing a create with content '''
- content = {"foo": "bar"}
- yed = Yedit("yedit_test.yml", content)
- yed.write()
- yed.load()
- self.assertTrue(yed.yaml_dict.has_key('foo'))
- self.assertTrue(yed.yaml_dict['foo'], 'bar')
-
- def test_array_insert(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[0]') == 'inject')
-
- def test_array_insert_first_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[1]') == 'f')
-
- def test_array_insert_second_index(self):
- '''Testing a create with content '''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', 'inject')
- self.assertTrue(yed.get('b.c.d[2]') == 'g')
-
- def test_dict_array_dict_access(self):
- '''Testing a create with content'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- self.assertTrue(yed.get('b.c.d[0].[0].x.y') == 'inject')
-
- def test_dict_array_dict_replace(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.put('b.c.d[0].[0].x.y', 'testing')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
- self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'], 'testing')
-
- def test_dict_array_dict_remove(self):
- '''Testing multilevel delete'''
- yed = Yedit("yedit_test.yml")
- yed.put('b.c.d[0]', [{'x': {'y': 'inject'}}])
- yed.delete('b.c.d[0].[0].x.y')
- self.assertTrue(yed.yaml_dict.has_key('b'))
- self.assertTrue(yed.yaml_dict['b'].has_key('c'))
- self.assertTrue(yed.yaml_dict['b']['c'].has_key('d'))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
- self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
- self.assertFalse(yed.yaml_dict['b']['c']['d'][0][0]['x'].has_key('y'))
-
- def tearDown(self):
- '''TearDown method'''
- os.unlink(YeditTest.filename)
-
-if __name__ == "__main__":
- unittest.main()