summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--filter_plugins/oo_filters.py83
-rw-r--r--git/.pylintrc390
-rwxr-xr-xgit/parent.rb45
-rwxr-xr-xgit/pylint.sh14
-rwxr-xr-xgit/yaml_validation.rb72
-rwxr-xr-xinventory/multi_ec2.py95
-rw-r--r--inventory/multi_ec2.yaml.example4
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml1
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py302
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml76
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml13
-rw-r--r--roles/openshift_master/tasks/main.yml18
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py413
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml3
18 files changed, 1353 insertions, 204 deletions
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index cf30cde9a..d22b6d188 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,14 +1,17 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
-from ansible import errors, runner
-import json
+from ansible import errors
+from operator import itemgetter
import pdb
-import re
def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
Ex: "{{ hostvars | oo_pdb }}"
'''
pdb.set_trace()
@@ -21,7 +24,8 @@ def oo_len(arg):
return len(arg)
def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+ ''' This looks up dictionary attributes of the form a.b.c and returns
+ the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
@@ -41,12 +45,13 @@ def oo_flatten(data):
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
- return [ item for sublist in data for item in sublist ]
+ return [item for sublist in data for item in sublist]
-def oo_collect(data, attribute=None, filters={}):
- ''' This takes a list of dict and collects all attributes specified into a list
- If filter is specified then we will include all items that match _ALL_ of filters.
+def oo_collect(data, attribute=None, filters=None):
+ ''' This takes a list of dict and collects all attributes specified into a
+ list If filter is specified then we will include all items that match
+ _ALL_ of filters.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
@@ -56,15 +61,18 @@ def oo_collect(data, attribute=None, filters={}):
filters = {'z': 'z'}
returns [1, 2, 3]
'''
-
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if filters:
- retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+ if filters is not None:
+ if not issubclass(type(filters), dict):
+ raise errors.AnsibleFilterError("|fialed expects filter to be a"
+ " dict")
+ retval = [get_attr(d, attribute) for d in data if (
+ all([d[key] == filters[key] for key in filters]))]
else:
retval = [get_attr(d, attribute) for d in data]
@@ -78,7 +86,7 @@ def oo_select_keys(data, keys):
'''
if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict")
if not issubclass(type(keys), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
@@ -98,30 +106,43 @@ def oo_prepend_strings_in_list(data, prepend):
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
retval = [prepend + s for s in data]
return retval
-def oo_get_deployment_type_from_groups(data):
- ''' This takes a list of groups and returns the associated
- deployment-type
+def oo_ami_selector(data, image_name):
+ ''' This takes a list of amis and an image name and attempts to return
+ the latest ami.
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
- regexp = re.compile('^tag_deployment-type[-_]')
- matches = filter(regexp.match, data)
- if len(matches) > 0:
- return regexp.sub('', matches[0])
- return "Unknown"
-class FilterModule (object):
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+# disabling pylint checks for too-few-public-methods and no-self-use since we
+# need to expose a FilterModule object that has a filters method that returns
+# a mapping of filter names to methods.
+# pylint: disable=too-few-public-methods, no-self-use
+class FilterModule(object):
+ ''' FilterModule '''
def filters(self):
+ ''' returns a mapping of filters to methods '''
return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_flatten": oo_flatten,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb,
- "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
- "oo_get_deployment_type_from_groups": oo_get_deployment_type_from_groups
- }
+ "oo_select_keys": oo_select_keys,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_len": oo_len,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_ami_selector": oo_ami_selector
+ }
diff --git a/git/.pylintrc b/git/.pylintrc
new file mode 100644
index 000000000..2d45f867e
--- /dev/null
+++ b/git/.pylintrc
@@ -0,0 +1,390 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Deprecated. It was used to include message's id in output. Use --msg-template
+# instead.
+#include-ids=no
+
+# Deprecated. It was used to include symbolic ids of messages in output. Use
+# --msg-template instead.
+#symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/git/parent.rb b/git/parent.rb
new file mode 100755
index 000000000..2acb127c4
--- /dev/null
+++ b/git/parent.rb
@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+#
+#
+#
+
+if __FILE__ == $0
+ # If we aren't on master we don't need to parent check
+ branch = 'prod'
+ exit(0) if ARGV[0] !~ /#{branch}/
+ commit_id = ARGV[1]
+ %x[/usr/bin/git checkout #{branch}]
+ %x[/usr/bin/git merge #{commit_id}]
+
+ count = 0
+ #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
+ lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
+ lines.each do |commit|
+ # next if they are in stage
+ next if commit =~ /^</
+ # remove the first char '>'
+ commit = commit[1..-1]
+ # check if any remote branches contain $commit
+ results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
+ # if this comes back empty, nothing contains it, we can skip it as
+ # we have probably created the merge commit here locally
+ next if results.empty?
+
+ # The results generally contain origin/pr/246/merge and origin/pr/246/head
+ # this is the pull request which would contain the commit in question.
+ #
+ # If the results do not contain origin/stg then stage does not contain
+ # the commit in question. Therefore we need to alert!
+ unless results =~ /origin\/stg/
+ puts "\nFAILED: (These commits are not in stage.)\n"
+ puts "\t#{commit}"
+ count += 1
+ end
+ end
+
+ # Exit with count of commits in #{branch} but not stg
+ exit(count)
+end
+
+__END__
+
diff --git a/git/pylint.sh b/git/pylint.sh
new file mode 100755
index 000000000..286747565
--- /dev/null
+++ b/git/pylint.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+
+OLDREV=$1
+NEWREV=$2
+TRG_BRANCH=$3
+
+PYTHON=/var/lib/jenkins/python27/bin/python
+
+/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
+ grep ".py$" | \
+ xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc {}
+
+exit $?
diff --git a/git/yaml_validation.rb b/git/yaml_validation.rb
new file mode 100755
index 000000000..f5ded7a78
--- /dev/null
+++ b/git/yaml_validation.rb
@@ -0,0 +1,72 @@
+#!/usr/bin/env ruby
+#
+#
+#
+require 'yaml'
+require 'tmpdir'
+
+class YamlValidate
+ def self.yaml_file?(filename)
+ return filename.end_with?('.yaml') || filename.end_with?('.yml')
+ end
+
+ def self.short_yaml_ext?(filename)
+ return filename.end_with?(".yml")
+ end
+
+ def self.valid_yaml?(filename)
+ YAML::load_file(filename)
+
+ return true
+ end
+end
+
+class GitCommit
+ attr_accessor :oldrev, :newrev, :refname, :tmp
+ def initialize(oldrev, newrev, refname)
+ @oldrev = oldrev
+ @newrev = newrev
+ @refname = refname
+ @tmp = Dir.mktmpdir(@newrev)
+ end
+
+ def get_file_changes()
+ files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
+
+ # if files is empty we will get a full checkout. This happens on
+ # a git rm file. If there are no changes then we need to skip the archive
+ return [] if files.empty?
+
+ # We only want to take the files that changed. Archive will do that when passed
+ # the filenames. It will export these to a tmp dir
+ system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
+ return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
+ end
+end
+
+if __FILE__ == $0
+ while data = STDIN.gets
+ oldrev, newrev, refname = data.split
+ gc = GitCommit.new(oldrev, newrev, refname)
+
+ results = []
+ gc.get_file_changes().each do |file|
+ begin
+ puts "++++++ Received: #{file}"
+
+ #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
+
+ # skip readme, other files, etc
+ next unless YamlValidate.yaml_file?(file)
+
+ results << YamlValidate.valid_yaml?(file)
+ rescue Exception => ex
+ puts "\n#{ex.message}\n\n"
+ results << false
+ end
+ end
+
+ #puts "RESULTS\n#{results.inspect}\n"
+ exit 1 if results.include?(false)
+ end
+end
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 26c09d712..b839a33ea 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -1,22 +1,29 @@
#!/usr/bin/env python2
+'''
+ Fetch and combine multiple ec2 account settings into a single
+ json hash.
+'''
# vim: expandtab:tabstop=4:shiftwidth=4
from time import time
import argparse
import yaml
import os
-import sys
-import pdb
import subprocess
import json
-import pprint
CONFIG_FILE_NAME = 'multi_ec2.yaml'
class MultiEc2(object):
+ '''
+ MultiEc2 class:
+ Opens a yaml config file and reads aws credentials.
+ Stores a json hash of resources in result.
+ '''
def __init__(self):
+ self.args = None
self.config = None
self.all_ec2_results = {}
self.result = {}
@@ -24,7 +31,7 @@ class MultiEc2(object):
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
- etc_dir_config_file = os.path.join(os.path.sep, 'etc','ansible', CONFIG_FILE_NAME)
+ etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
# Prefer a file in the same directory, fall back to a file in etc
if os.path.isfile(same_dir_config_file):
@@ -39,12 +46,13 @@ class MultiEc2(object):
# load yaml
if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
- elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+ elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+ os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
self.config = {}
self.config['accounts'] = [
{
'name': 'default',
- 'provider': 'aws/ec2.py',
+ 'provider': 'aws/hosts/ec2.py',
'env_vars': {
'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
@@ -56,13 +64,9 @@ class MultiEc2(object):
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
- if self.args.cache_only:
- # get data from disk
- result = self.get_inventory_from_cache()
-
- if not result:
- self.get_inventory()
- self.write_to_cache()
+ if self.args.refresh_cache:
+ self.get_inventory()
+ self.write_to_cache()
# if its a host query, fetch and do not cache
elif self.args.host:
self.get_inventory()
@@ -74,7 +78,7 @@ class MultiEc2(object):
# get data from disk
self.get_inventory_from_cache()
- def load_yaml_config(self,conf_file=None):
+ def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the
respective cloud for inventory.
"""
@@ -88,7 +92,7 @@ class MultiEc2(object):
return config
- def get_provider_tags(self,provider, env={}):
+ def get_provider_tags(self, provider, env=None):
"""Call <provider> and query all of the tags that are usuable
by ansible. If environment is empty use the default env.
"""
@@ -153,7 +157,8 @@ class MultiEc2(object):
self.all_ec2_results[result['name']] = json.loads(result['out'])
values = self.all_ec2_results.values()
values.insert(0, self.result)
- [MultiEc2.merge_destructively(self.result, x) for x in values]
+ for result in values:
+ MultiEc2.merge_destructively(self.result, result)
else:
# For any 0 result, return it
count = 0
@@ -165,30 +170,30 @@ class MultiEc2(object):
raise RuntimeError("Found > 1 results for --host %s. \
This is an invalid state." % self.args.host)
@staticmethod
- def merge_destructively(a, b):
- "merges b into a"
- for key in b:
- if key in a:
- if isinstance(a[key], dict) and isinstance(b[key], dict):
- MultiEc2.merge_destructively(a[key], b[key])
- elif a[key] == b[key]:
+ def merge_destructively(input_a, input_b):
+ "merges b into input_a"
+ for key in input_b:
+ if key in input_a:
+ if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+ MultiEc2.merge_destructively(input_a[key], input_b[key])
+ elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
- elif isinstance(a[key], list) and isinstance(b[key],list):
- for x in b[key]:
- if x not in a[key]:
- a[key].append(x)
+ elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+ for result in input_b[key]:
+ if result not in input_a[key]:
+ input_a[key].input_append(result)
# a is a list and not b
- elif isinstance(a[key], list):
- if b[key] not in a[key]:
- a[key].append(b[key])
- elif isinstance(b[key], list):
- a[key] = [a[key]] + [k for k in b[key] if k != a[key]]
+ elif isinstance(input_a[key], list):
+ if input_b[key] not in input_a[key]:
+ input_a[key].append(input_b[key])
+ elif isinstance(input_b[key], list):
+ input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
- a[key] = [a[key],b[key]]
+ input_a[key] = [input_a[key], input_b[key]]
else:
- a[key] = b[key]
- return a
+ input_a[key] = input_b[key]
+ return input_a
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@@ -204,19 +209,20 @@ class MultiEc2(object):
def parse_cli_args(self):
''' Command line argument processing '''
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on a provider')
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Fetch cached only instances (default: False)')
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on a provider')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
+ help='List instances (default: True)')
parser.add_argument('--host', action='store', default=False,
- help='Get all the variables about a specific instance')
+ help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def write_to_cache(self):
''' Writes data in JSON format to a file '''
- json_data = self.json_format_dict(self.result, True)
+ json_data = MultiEc2.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
cache.write(json_data)
@@ -232,7 +238,8 @@ class MultiEc2(object):
return True
- def json_format_dict(self, data, pretty=False):
+ @classmethod
+ def json_format_dict(cls, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
@@ -242,9 +249,9 @@ class MultiEc2(object):
return json.dumps(data)
def result_str(self):
+ '''Return cache string stored in self.result'''
return self.json_format_dict(self.result, True)
if __name__ == "__main__":
- mi = MultiEc2()
- print mi.result_str()
+ print MultiEc2().result_str()
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index 0bd505816..91e7c7970 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -1,13 +1,13 @@
# multi ec2 inventory configs
accounts:
- name: aws1
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- name: aws2
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index a0de00fc3..3eb5496e4 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -5,6 +5,7 @@
gather_facts: no
vars_files:
- vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
tasks:
- fail:
msg: Deployment type not supported for aws provider yet
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
new file mode 100644
index 000000000..29e594a65
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+ - Returns list of matching AMIs with AMI ID, along with other useful information
+ - Can search AMIs with different owners
+ - Can search by matching tag(s), by AMI name and/or other criteria
+ - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+ - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+ - See the example below for a suggestion of how to search by distro/release.
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: [ 'aws_region', 'ec2_region' ]
+ owner:
+ description:
+ - Search AMIs owned by the specified owner
+ - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+ - If not specified, all EC2 AMIs in the specified region will be searched.
+ - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+ required: false
+ default: null
+ ami_id:
+ description:
+ - An AMI ID to match.
+ default: null
+ required: false
+ ami_tags:
+ description:
+ - A hash/dictionary of tags to match for the AMI.
+ default: null
+ required: false
+ architecture:
+ description:
+ - An architecture type to match (e.g. x86_64).
+ default: null
+ required: false
+ hypervisor:
+ description:
+ - A hypervisor type type to match (e.g. xen).
+ default: null
+ required: false
+ is_public:
+ description:
+ - Whether or not the image(s) are public.
+ choices: ['yes', 'no']
+ default: null
+ required: false
+ name:
+ description:
+ - An AMI name to match.
+ default: null
+ required: false
+ platform:
+ description:
+ - Platform type to match.
+ default: null
+ required: false
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ - If specifying 'tag', the 'tag_name' parameter is required.
+ choices: ['name', 'description', 'tag']
+ default: null
+ required: false
+ sort_tag:
+ description:
+ - Tag name with which to sort results.
+ - Required when specifying 'sort=tag'.
+ default: null
+ required: false
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ state:
+ description:
+ - AMI state to match.
+ default: 'available'
+ required: false
+ virtualization_type:
+ description:
+ - Virtualization type to match (e.g. hvm).
+ default: null
+ required: false
+ no_result_action:
+ description:
+ - What to do when no results are found.
+ - "'success' reports success and returns an empty array"
+ - "'fail' causes the module to report failure"
+ choices: ['success', 'fail']
+ default: 'success'
+ required: false
+requirements:
+ - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+ owner: self
+ tags:
+ project: website
+ no_result_action: fail
+ register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+ name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+ owner: 099720109477
+ sort: name
+ sort_order: descending
+ sort_end: 1
+ register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+ image: "{{ ami_search.results[0].ami_id }}"
+ instance_type: m3.medium
+ key_name: mykey
+ wait: yes
+'''
+
+try:
+ import boto.ec2
+ HAS_BOTO=True
+except ImportError:
+ HAS_BOTO=False
+
+import json
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region = dict(required=True,
+ aliases = ['aws_region', 'ec2_region']),
+ owner = dict(required=False, default=None),
+ ami_id = dict(required=False),
+ ami_tags = dict(required=False, type='dict',
+ aliases = ['search_tags', 'image_tags']),
+ architecture = dict(required=False),
+ hypervisor = dict(required=False),
+ is_public = dict(required=False),
+ name = dict(required=False),
+ platform = dict(required=False),
+ sort = dict(required=False, default=None,
+ choices=['name', 'description', 'tag']),
+ sort_tag = dict(required=False),
+ sort_order = dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start = dict(required=False),
+ sort_end = dict(required=False),
+ state = dict(required=False, default='available'),
+ virtualization_type = dict(required=False),
+ no_result_action = dict(required=False, default='success',
+ choices = ['success', 'fail']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+ ami_id = module.params.get('ami_id')
+ ami_tags = module.params.get('ami_tags')
+ architecture = module.params.get('architecture')
+ hypervisor = module.params.get('hypervisor')
+ is_public = module.params.get('is_public')
+ name = module.params.get('name')
+ owner = module.params.get('owner')
+ platform = module.params.get('platform')
+ sort = module.params.get('sort')
+ sort_tag = module.params.get('sort_tag')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+ state = module.params.get('state')
+ virtualization_type = module.params.get('virtualization_type')
+ no_result_action = module.params.get('no_result_action')
+
+ filter = {'state': state}
+
+ if ami_id:
+ filter['image_id'] = ami_id
+ if ami_tags:
+ for tag in ami_tags:
+ filter['tag:'+tag] = ami_tags[tag]
+ if architecture:
+ filter['architecture'] = architecture
+ if hypervisor:
+ filter['hypervisor'] = hypervisor
+ if is_public:
+ filter['is_public'] = is_public
+ if name:
+ filter['name'] = name
+ if platform:
+ filter['platform'] = platform
+ if virtualization_type:
+ filter['virtualization_type'] = virtualization_type
+
+ ec2 = ec2_connect(module)
+
+ images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+ if no_result_action == 'fail' and len(images_result) == 0:
+ module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+ results = []
+ for image in images_result:
+ data = {
+ 'ami_id': image.id,
+ 'architecture': image.architecture,
+ 'description': image.description,
+ 'is_public': image.is_public,
+ 'name': image.name,
+ 'owner_id': image.owner_id,
+ 'platform': image.platform,
+ 'root_device_name': image.root_device_name,
+ 'root_device_type': image.root_device_type,
+ 'state': image.state,
+ 'tags': image.tags,
+ 'virtualization_type': image.virtualization_type,
+ }
+
+ if image.kernel_id:
+ data['kernel_id'] = image.kernel_id
+ if image.ramdisk_id:
+ data['ramdisk_id'] = image.ramdisk_id
+
+ results.append(data)
+
+ if sort == 'tag':
+ if not sort_tag:
+ module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+ results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+ elif sort:
+ results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+ try:
+ if sort and sort_start and sort_end:
+ results = results[int(sort_start):int(sort_end)]
+ elif sort and sort_start:
+ results = results[int(sort_start):]
+ elif sort and sort_end:
+ results = results[:int(sort_end)]
+ except TypeError:
+ module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 34172396a..39ad9d089 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -4,32 +4,64 @@
env: "{{ cluster }}"
env_host_type: "{{ cluster }}-openshift-{{ type }}"
host_type: "{{ type }}"
- machine_type: "{{ lookup('env', 'ec2_instance_type')
- | default(deployment_vars[deployment_type].type, true) }}"
- machine_image: "{{ lookup('env', 'ec2_ami')
- | default(deployment_vars[deployment_type].image, true) }}"
- machine_region: "{{ lookup('env', 'ec2_region')
- | default(deployment_vars[deployment_type].region, true) }}"
- machine_keypair: "{{ lookup('env', 'ec2_keypair')
- | default(deployment_vars[deployment_type].keypair, true) }}"
- machine_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
- | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
- machine_public_ip: "{{ lookup('env', 'ec2_public_ip')
- | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
- security_groups: "{{ lookup('env', 'ec2_security_groups')
- | default(deployment_vars[deployment_type].security_groups, true) }}"
+
+- set_fact:
+ ec2_region: "{{ lookup('env', 'ec2_region')
+ | default(deployment_vars[deployment_type].region, true) }}"
+ when: ec2_region is not defined
+- set_fact:
+ ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+ | default(deployment_vars[deployment_type].image_name, true) }}"
+ when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+ ec2_image: "{{ lookup('env', 'ec2_image')
+ | default(deployment_vars[deployment_type].image, true) }}"
+ when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
+- set_fact:
+ ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+ | default(deployment_vars[deployment_type].keypair, true) }}"
+ when: ec2_keypair is not defined
+- set_fact:
+ ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+ | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+ when: ec2_vpc_subnet is not defined
+- set_fact:
+ ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+ | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+ when: ec2_assign_public_ip is not defined
+- set_fact:
+ ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+ ec2_ami_find:
+ region: "{{ ec2_region }}"
+ ami_id: "{{ ec2_image | default(omit, true) }}"
+ name: "{{ ec2_image_name | default(omit, true) }}"
+ register: ami_result
+
+- fail: msg="Could not find requested ami"
+ when: not ami_result.results
+
+- set_fact:
+ latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
- name: Launch instance(s)
ec2:
state: present
- region: "{{ machine_region }}"
- keypair: "{{ machine_keypair }}"
- group: "{{ security_groups }}"
- instance_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
+ region: "{{ ec2_region }}"
+ keypair: "{{ ec2_keypair }}"
+ group: "{{ ec2_security_groups }}"
+ instance_type: "{{ ec2_instance_type }}"
+ image: "{{ latest_ami }}"
count: "{{ instances | oo_len }}"
- vpc_subnet_id: "{{ machine_subnet | default(omit, true) }}"
- assign_public_ip: "{{ machine_public_ip | default(omit, true) }}"
+ vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+ assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
wait: yes
instance_tags:
created-by: "{{ created_by }}"
@@ -39,7 +71,7 @@
register: ec2
- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
+ ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
with_together:
- instances
- ec2.instances
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.defaults.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index f87e7aba3..07e453f89 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -3,6 +3,7 @@ deployment_vars:
origin:
# fedora, since centos requires marketplace
image: ami-acd999c4
+ image_name:
region: us-east-1
ssh_user: fedora
sudo: yes
@@ -13,18 +14,20 @@ deployment_vars:
assign_public_ip:
online:
# private ami
- image: ami-906240f8
+ image: ami-7a9e9812
+ image_name: openshift-rhel7_*
region: us-east-1
ssh_user: root
sudo: no
- keypair: mmcgrath_libra
+ keypair: libra
type: m3.large
- security_groups: [ 'int-v3' ]
- vpc_subnet: subnet-987c0def
- assign_public_ip: yes
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
enterprise:
# rhel-7.1, requires cloud access subscription
image: ami-10663b78
+ image_name:
region: us-east-1
ssh_user: ec2-user
sudo: yes
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index da184e972..28bdda618 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -84,15 +84,23 @@
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
-- name: Create .kube directory
+- name: Create the OpenShift client config dir(s)
file:
- path: /root/.kube
+ path: "~{{ item }}/.config/openshift"
state: directory
mode: 0700
+ owner: "{{ item }}"
+ group: "{{ item }}"
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Configure root user kubeconfig
- command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig
+- name: Create the OpenShift client config(s)
+ command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
args:
- creates: /root/.kube/.kubeconfig
+ creates: ~{{ item }}/.config/openshift/.config
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index 1ec977716..afa9eb27d 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -1,12 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+# fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+# too-few-public-methods
+# no-self-use
+# too-many-arguments
+# too-many-locals
+# too-many-branches
+# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
+"""Ansible module to register a kubernetes node to the cluster"""
import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-from decimal import *
DOCUMENTATION = '''
---
@@ -93,73 +102,170 @@ EXAMPLES = '''
class ClientConfigException(Exception):
+ """Client Configuration Exception"""
pass
-class ClientConfig:
+class ClientConfig(object):
+ """ Representation of a client config
+
+ Attributes:
+ config (dict): dictionary representing the client configuration
+
+ Args:
+ client_opts (list of str): client options to use
+ module (AnsibleModule):
+
+ Raises:
+ ClientConfigException:
+ """
def __init__(self, client_opts, module):
kubectl = module.params['kubectl_cmd']
- _, output, error = module.run_command(kubectl + ["config", "view", "-o", "json"] + client_opts, check_rc = True)
+ _, output, _ = module.run_command((kubectl +
+ ["config", "view", "-o", "json"] +
+ client_opts), check_rc=True)
self.config = json.loads(output)
if not (bool(self.config['clusters']) or
bool(self.config['contexts']) or
bool(self.config['current-context']) or
bool(self.config['users'])):
- raise ClientConfigException(msg="Client config missing required " \
- "values",
- output=output)
+ raise ClientConfigException(
+ "Client config missing required values: %s" % output
+ )
def current_context(self):
+ """ Gets the current context for the client config
+
+ Returns:
+ str: The current context as set in the config
+ """
return self.config['current-context']
def section_has_value(self, section_name, value):
+ """ Test if specified section contains a value
+
+ Args:
+ section_name (str): config section to test
+ value (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
section = self.config[section_name]
if isinstance(section, dict):
return value in section
else:
val = next((item for item in section
- if item['name'] == value), None)
+ if item['name'] == value), None)
return val is not None
def has_context(self, context):
+ """ Test if specified context exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('contexts', context)
def has_user(self, user):
+ """ Test if specified user exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('users', user)
def has_cluster(self, cluster):
+ """ Test if specified cluster exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('clusters', cluster)
def get_value_for_context(self, context, attribute):
+ """ Get the value of attribute in context
+
+ Args:
+ context (str): context to search
+ attribute (str): attribute wanted
+ Returns:
+ str: The value for attribute in context
+ """
contexts = self.config['contexts']
if isinstance(contexts, dict):
return contexts[context][attribute]
else:
return next((c['context'][attribute] for c in contexts
- if c['name'] == context), None)
+ if c['name'] == context), None)
def get_user_for_context(self, context):
+ """ Get the user attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'user')
def get_cluster_for_context(self, context):
+ """ Get the cluster attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'cluster')
def get_namespace_for_context(self, context):
+ """ Get the namespace attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'namespace')
-class Util:
+class Util(object):
+ """Utility methods"""
@staticmethod
def remove_empty_elements(mapping):
+ """ Recursively removes empty elements from a dict
+
+ Args:
+ mapping (dict): dict to remove empty attributes from
+ Returns:
+ dict: A copy of the dict with empty elements removed
+ """
if isinstance(mapping, dict):
- m = mapping.copy()
+ copy = mapping.copy()
for key, val in mapping.iteritems():
if not val:
- del m[key]
- return m
+ del copy[key]
+ return copy
else:
return mapping
-class NodeResources:
+class NodeResources(object):
+ """ Kubernetes Node Resources
+
+ Attributes:
+ resources (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ """
def __init__(self, version, cpu=None, memory=None):
if version == 'v1beta1':
self.resources = dict(capacity=dict())
@@ -167,10 +273,31 @@ class NodeResources:
self.resources['capacity']['memory'] = memory
def get_resources(self):
+ """ Get the dict representing the node resources
+
+ Returns:
+ dict: representation of the node resources with any empty
+ elements removed
+ """
return Util.remove_empty_elements(self.resources)
-class NodeSpec:
- def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
+class NodeSpec(object):
+ """ Kubernetes Node Spec
+
+ Attributes:
+ spec (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ cidr (str): string representation of the cidr block available for
+ the node
+ externalID (str): The external id of the node
+ """
+ def __init__(self, version, cpu=None, memory=None, cidr=None,
+ externalID=None):
if version == 'v1beta3':
self.spec = dict(podCIDR=cidr, externalID=externalID,
capacity=dict())
@@ -178,67 +305,128 @@ class NodeSpec:
self.spec['capacity']['memory'] = memory
def get_spec(self):
+ """ Get the dict representing the node spec
+
+ Returns:
+ dict: representation of the node spec with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.spec)
-class NodeStatus:
- def addAddresses(self, addressType, addresses):
- addressList = []
+class NodeStatus(object):
+ """ Kubernetes Node Status
+
+ Attributes:
+ status (dict): A dictionary representing the node status
+
+ Args:
+ version (str): kubernetes api version
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ hostnames (list, optional): hostnames for the node
+ """
+ def add_addresses(self, address_type, addresses):
+ """ Adds addresses of the specified type
+
+ Args:
+ address_type (str): address type
+ addresses (list): addresses to add
+ """
+ address_list = []
for address in addresses:
- addressList.append(dict(type=addressType, address=address))
- return addressList
+ address_list.append(dict(type=address_type, address=address))
+ return address_list
- def __init__(self, version, externalIPs = [], internalIPs = [],
- hostnames = []):
+ def __init__(self, version, externalIPs=None, internalIPs=None,
+ hostnames=None):
if version == 'v1beta3':
- self.status = dict(addresses = addAddresses('ExternalIP',
- externalIPs) +
- addAddresses('InternalIP',
- internalIPs) +
- addAddresses('Hostname',
- hostnames))
+ addresses = []
+ if externalIPs is not None:
+ addresses += self.add_addresses('ExternalIP', externalIPs)
+ if internalIPs is not None:
+ addresses += self.add_addresses('InternalIP', internalIPs)
+ if hostnames is not None:
+ addresses += self.add_addresses('Hostname', hostnames)
+
+ self.status = dict(addresses=addresses)
def get_status(self):
+ """ Get the dict representing the node status
+
+ Returns:
+ dict: representation of the node status with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.status)
-class Node:
- def __init__(self, module, client_opts, version='v1beta1', name=None,
- hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
- cpu=None, memory=None, labels=dict(), annotations=dict(),
- podCIDR=None, externalID=None):
+class Node(object):
+ """ Kubernetes Node
+
+ Attributes:
+ status (dict): A dictionary representing the node
+
+ Args:
+ module (AnsibleModule):
+ client_opts (list): client connection options
+ version (str, optional): kubernetes api version
+ node_name (str, optional): name for node
+ hostIP (str, optional): node host ip
+ hostnames (list, optional): hostnames for the node
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ cpu (str, optional): cpu resources for the node
+ memory (str, optional): memory resources for the node
+ labels (list, optional): labels for the node
+ annotations (list, optional): annotations for the node
+ podCIDR (list, optional): cidr block to use for pods
+ externalID (str, optional): external id of the node
+ """
+ def __init__(self, module, client_opts, version='v1beta1', node_name=None,
+ hostIP=None, hostnames=None, externalIPs=None,
+ internalIPs=None, cpu=None, memory=None, labels=None,
+ annotations=None, podCIDR=None, externalID=None):
self.module = module
self.client_opts = client_opts
if version == 'v1beta1':
- self.node = dict(id = name,
- kind = 'Node',
- apiVersion = version,
- hostIP = hostIP,
- resources = NodeResources(version, cpu, memory),
- cidr = podCIDR,
- labels = labels,
- annotations = annotations,
- externalID = externalID
- )
+ self.node = dict(id=node_name,
+ kind='Node',
+ apiVersion=version,
+ hostIP=hostIP,
+ resources=NodeResources(version, cpu, memory),
+ cidr=podCIDR,
+ labels=labels,
+ annotations=annotations,
+ externalID=externalID)
elif version == 'v1beta3':
- metadata = dict(name = name,
- labels = labels,
- annotations = annotations
- )
- self.node = dict(kind = 'Node',
- apiVersion = version,
- metadata = metadata,
- spec = NodeSpec(version, cpu, memory, podCIDR,
- externalID),
- status = NodeStatus(version, externalIPs,
- internalIPs, hostnames),
- )
+ metadata = dict(name=node_name,
+ labels=labels,
+ annotations=annotations)
+ self.node = dict(kind='Node',
+ apiVersion=version,
+ metadata=metadata,
+ spec=NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status=NodeStatus(version, externalIPs,
+ internalIPs, hostnames))
def get_name(self):
+ """ Get the name for the node
+
+ Returns:
+ str: node name
+ """
if self.node['apiVersion'] == 'v1beta1':
return self.node['id']
elif self.node['apiVersion'] == 'v1beta3':
return self.node['name']
def get_node(self):
+ """ Get the dict representing the node
+
+ Returns:
+ dict: representation of the node with any empty elements
+ removed
+ """
node = self.node.copy()
if self.node['apiVersion'] == 'v1beta1':
node['resources'] = self.node['resources'].get_resources()
@@ -248,54 +436,82 @@ class Node:
return Util.remove_empty_elements(node)
def exists(self):
+ """ Tests if the node already exists
+
+ Returns:
+ bool: True if node exists, otherwise False
+ """
kubectl = self.module.params['kubectl_cmd']
- _, output, error = self.module.run_command(kubectl + ["get", "nodes"] + self.client_opts, check_rc = True)
+ _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] +
+ self.client_opts),
+ check_rc=True)
if re.search(self.module.params['name'], output, re.MULTILINE):
return True
return False
def create(self):
+ """ Creates the node
+
+ Returns:
+ bool: True if node creation successful
+ """
kubectl = self.module.params['kubectl_cmd']
cmd = kubectl + self.client_opts + ['create', '-f', '-']
- rc, output, error = self.module.run_command(cmd,
- data=self.module.jsonify(self.get_node()))
- if rc != 0:
+ exit_code, output, error = self.module.run_command(
+ cmd, data=self.module.jsonify(self.get_node())
+ )
+ if exit_code != 0:
if re.search("minion \"%s\" already exists" % self.get_name(),
error):
- self.module.exit_json(changed=False,
- msg="node definition already exists",
- node=self.get_node())
+ self.module.exit_json(msg="node definition already exists",
+ changed=False, node=self.get_node())
else:
- self.module.fail_json(msg="Node creation failed.", rc=rc,
- output=output, error=error,
- node=self.get_node())
+ self.module.fail_json(msg="Node creation failed.",
+ exit_code=exit_code,
+ output=output, error=error,
+ node=self.get_node())
else:
return True
def main():
+ """ main """
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True, type = 'str'),
- host_ip = dict(type = 'str'),
- hostnames = dict(type = 'list', default = []),
- external_ips = dict(type = 'list', default = []),
- internal_ips = dict(type = 'list', default = []),
- api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
- choices = ['v1beta1', 'v1beta3']),
- cpu = dict(type = 'str'),
- memory = dict(type = 'str'),
- labels = dict(type = 'dict', default = {}), # TODO: needs documented
- annotations = dict(type = 'dict', default = {}), # TODO: needs documented
- pod_cidr = dict(type = 'str'), # TODO: needs documented
- external_id = dict(type = 'str'), # TODO: needs documented
- client_config = dict(type = 'str'), # TODO: needs documented
- client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_context = dict(type = 'str', default = 'default'), # TODO: needs documented
- client_namespace = dict(type = 'str', default = 'default'), # TODO: needs documented
- client_user = dict(type = 'str', default = 'system:openshift-client'), # TODO: needs documented
- kubectl_cmd = dict(type = 'list', default = ['kubectl']) # TODO: needs documented
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ hostnames=dict(type='list', default=[]),
+ external_ips=dict(type='list', default=[]),
+ internal_ips=dict(type='list', default=[]),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ external_id=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:openshift-client'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
),
- mutually_exclusive = [
+ mutually_exclusive=[
['host_ip', 'external_ips'],
['host_ip', 'internal_ips'],
['host_ip', 'hostnames'],
@@ -303,7 +519,10 @@ def main():
supports_check_mode=True
)
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ client_config = '~/.kube/.kubeconfig'
+ if 'default_client_config' in module.params:
+ client_config = module.params['default_client_config']
+ user_has_client_config = os.path.exists(os.path.expanduser(client_config))
if not (user_has_client_config or module.params['client_config']):
module.fail_json(msg="Could not locate client configuration, "
"client_config must be specified if "
@@ -311,12 +530,17 @@ def main():
client_opts = []
if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+ kubeconfig_flag = '--kubeconfig'
+ if 'kubeconfig_flag' in module.params:
+ kubeconfig_flag = module.params['kubeconfig_flag']
+ client_opts.append(kubeconfig_flag + '=' +
+ os.path.expanduser(module.params['client_config']))
try:
config = ClientConfig(client_opts, module)
- except ClientConfigException as e:
- module.fail_json(msg="Failed to get client configuration", exception=e)
+ except ClientConfigException as ex:
+ module.fail_json(msg="Failed to get client configuration",
+ exception=str(ex))
client_context = module.params['client_context']
if config.has_context(client_context):
@@ -369,7 +593,8 @@ def main():
module.fail_json(msg="Unknown error creating node",
node=node.get_node())
-
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index 85bead16d..d4d72d126 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -39,7 +39,8 @@
- name: Register unregistered nodes
kubernetes_register_node:
- kubectl_cmd: ['openshift', 'kube']
+ kubectl_cmd: ['osc']
+ default_client_config: '~/.config/openshift/.config'
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"