summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc5
-rw-r--r--.github/ISSUE_TEMPLATE.md31
-rw-r--r--.gitignore2
-rw-r--r--.pylintrc (renamed from git/.pylintrc)173
-rw-r--r--.travis.yml4
-rw-r--r--.yamllint (renamed from git/.yamllint)0
-rw-r--r--CONTRIBUTING.md49
-rwxr-xr-xgit/parent.py97
-rwxr-xr-xgit/pylint.sh51
-rwxr-xr-xgit/yaml_validation.py73
-rwxr-xr-xinventory/gce/hosts/gce.py252
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py10
-rw-r--r--openshift-ansible.spec1
-rw-r--r--playbooks/adhoc/uninstall.yml274
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml21
-rw-r--r--playbooks/common/openshift-master/config.yml31
-rw-r--r--playbooks/common/openshift-node/config.yml48
-rw-r--r--requirements.txt4
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py25
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py33
-rw-r--r--roles/openshift_hosted/meta/main.yml21
-rw-r--r--roles/openshift_master/meta/main.yml31
-rw-r--r--roles/openshift_node/meta/main.yml24
-rw-r--r--setup.cfg27
-rw-r--r--setup.py191
-rw-r--r--test-requirements.txt11
-rw-r--r--tox.ini19
l---------utils/.pylintrc1
-rw-r--r--utils/Makefile30
-rw-r--r--utils/README.md41
-rw-r--r--utils/setup.cfg4
-rw-r--r--utils/test-requirements.txt2
-rw-r--r--utils/test/openshift_ansible_tests.py71
-rw-r--r--utils/test/test_utils.py1
-rw-r--r--utils/tox.ini5
35 files changed, 1042 insertions, 621 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 000000000..e1d918755
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+omit=
+ */lib/python*/site-packages/*
+ */lib/python*/*
+ /usr/*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 627fa13eb..326e75c7e 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,21 +1,34 @@
-[provide a description of the issue]
+#### Description
+Please provide a brief description of your issue.
##### Version
-[if you're operating from a git clone provide the output of `git describe`]
-[if you're running from playbooks installed via RPM or atomic-openshift-utils `rpm -q atomic-openshift-utils openshift-ansible`]
-[Your version of ansible, `ansible --version`]
-
+If you're operating from a git clone provide the output of `git describe`. If
+you're running from playbooks installed via RPM or atomic-openshift-utils `rpm
+-q atomic-openshift-utils openshift-ansible`. Please also provide your version
+of ansible, `ansible --version`. Please the output between the code block below.
+```
+Please place output here
+```
##### Steps To Reproduce
1. [step 1]
2. [step 2]
+
##### Current Result
+```
+Example command and output or error messages
+```
##### Expected Result
+```
+Example command and output or error messages
+```
##### Additional Information
-[The exact command you ran]
-[Your operating system and version, ie: RHEL 7.2, Fedora 23]
-[Your inventory file]
-[visit https://docs.openshift.org/latest/welcome/index.html]
+```
+Your operating system and version, ie: RHEL 7.2, Fedora 23]
+Your inventory file
+Sample code, etc
+code, etc
+```
diff --git a/.gitignore b/.gitignore
index ac249d5eb..9af271235 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,3 +25,5 @@ ansible.cfg
.tox
.coverage
*.egg-info
+.eggs
+cover
diff --git a/git/.pylintrc b/.pylintrc
index 411330fe7..a32bd3d68 100644
--- a/git/.pylintrc
+++ b/.pylintrc
@@ -1,5 +1,4 @@
[MASTER]
-
# Specify a configuration file.
#rcfile=
@@ -7,12 +6,9 @@
# pygtk.require().
#init-hook=
-# Profiled execution.
-#profile=no
-
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS
+ignore=CVS,setup.py
# Pickle collected data for later comparisons.
persistent=no
@@ -21,14 +17,6 @@ persistent=no
# usually to register additional checkers.
load-plugins=
-# Deprecated. It was used to include message's id in output. Use --msg-template
-# instead.
-#include-ids=no
-
-# Deprecated. It was used to include symbolic ids of messages in output. Use
-# --msg-template instead.
-#symbols=no
-
# Use multiple processes to speed up Pylint.
jobs=1
@@ -58,7 +46,8 @@ confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
-# multiple time. See also the "--disable" option for examples.
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
@@ -70,8 +59,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-# w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801,locally-disabled,file-ignored
+disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating
[REPORTS]
@@ -96,20 +84,24 @@ reports=no
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-#comment=no
-
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
-[LOGGING]
+[SIMILARITIES]
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=yes
[BASIC]
@@ -192,44 +184,23 @@ method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
-no-docstring-rgx=__.*__
+no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-min-similarity-lines=0
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=yes
-
-
-[VARIABLES]
+[ELIF]
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_$|dummy
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
+[MISCELLANEOUS]
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,_cb
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
[TYPECHECK]
@@ -240,27 +211,30 @@ ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of classes names for which member attributes should not be checked
-# (useful for classes with attributes dynamically set).
-ignored-classes=SQLObject
-
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-#zope=no
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=
# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E0201 when accessed. Python regular
+# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=REQUEST,acl_users,aq_parent
+generated-members=
[SPELLING]
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
+# Spelling dictionary name. Available dictionaries: en_ZW (myspell), en_NG
+# (myspell), en_NA (myspell), en_NZ (myspell), en_PH (myspell), en_AG
+# (myspell), en_BW (myspell), en_IE (myspell), en_ZM (myspell), en_DK
+# (myspell), en_CA (myspell), en_GH (myspell), en_IN (myspell), en_BZ
+# (myspell), en_MW (myspell), en_TT (myspell), en_JM (myspell), en_GB
+# (myspell), en_ZA (myspell), en_SG (myspell), en_AU (myspell), en_US
+# (myspell), en_BS (myspell), en_HK (myspell).
spelling-dict=
# List of comma separated words that should not be checked.
@@ -274,12 +248,6 @@ spelling-private-dict-file=
spelling-store-unknown-words=no
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,XXX,TODO
-
-
[FORMAT]
# Maximum number of characters on a single line.
@@ -292,23 +260,67 @@ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# else.
single-line-if-stmt=no
-# List of optional constructs for which whitespace checking is disabled
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
-# Number of spaces of indent required inside a hanging or continued line.
+# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
[DESIGN]
# Maximum number of arguments for function / method
@@ -342,21 +354,8 @@ min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,_fields,_replace,_source,_make
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
[IMPORTS]
diff --git a/.travis.yml b/.travis.yml
index 0e3a75df7..f0a228c23 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -11,8 +11,10 @@ python:
install:
- pip install -r requirements.txt
+ - pip install tox-travis
script:
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible-playbook --syntax-check playbooks/byo/config.yml
- - cd utils && make ci
+ - tox
+ - cd utils && tox
diff --git a/git/.yamllint b/.yamllint
index 573321a94..573321a94 100644
--- a/git/.yamllint
+++ b/.yamllint
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1145da495..83c844e28 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -66,30 +66,55 @@ These are plugins used in playbooks and roles:
└── test Contains tests.
```
-### Others
-
-```
-.
-└── git Contains some helper scripts for repository maintenance.
-```
-
## Building RPMs
See the [RPM build instructions](BUILD.md).
## Running tests
-We use [Nose](http://readthedocs.org/docs/nose/) as a test runner. Make sure it
-is installed along with other test dependencies:
+This section covers how to run tests for the root of this repo, running tests
+for the oo-install wrapper is described in [utils/README.md](utils/README.md).
+
+We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using
+[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
+parallel
+
```
-pip install -r utils/test-requirements.txt
+pip install tox detox
```
-Run the tests with:
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
```
-nosetests
+source .tox/py27-ansible22-flake8/bin/activate
```
## Submitting contributions
diff --git a/git/parent.py b/git/parent.py
deleted file mode 100755
index 92f57df3e..000000000
--- a/git/parent.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-# pylint: skip-file
-'''
- Script to determine if this commit has also
- been merged through the stage branch
-'''
-#
-# Usage:
-# parent_check.py <branch> <commit_id>
-#
-#
-import sys
-import subprocess
-
-def run_cli_cmd(cmd, in_stdout=None, in_stderr=None):
- '''Run a command and return its output'''
- if not in_stderr:
- proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
- else:
- proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False)
- stdout, stderr = proc.communicate()
- if proc.returncode != 0:
- return {"rc": proc.returncode, "error": stderr}
- else:
- return {"rc": proc.returncode, "result": stdout}
-
-def main():
- '''Check to ensure that the commit that is currently
- being submitted is also in the stage branch.
-
- if it is, succeed
- else, fail
- '''
- branch = 'prod'
-
- if sys.argv[1] != branch:
- sys.exit(0)
-
- # git co stg
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg'])
-
- # git pull latest
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
-
- # setup on the <prod> branch in git
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod'])
-
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
- # merge the passed in commit into my current <branch>
-
- commit_id = sys.argv[2]
- results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id])
-
- # get the differences from stg and <branch>
- results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod'])
-
- # exit here with error code if the result coming back is an error
- if results['rc'] != 0:
- print results['error']
- sys.exit(results['rc'])
-
- count = 0
- # Each 'result' is a commit
- # Walk through each commit and see if it is in stg
- for commit in results['result'].split('\n'):
-
- # continue if it is already in stg
- if not commit or commit.startswith('<'):
- continue
-
- # remove the first char '>'
- commit = commit[1:]
-
- # check if any remote branches contain $commit
- results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None)
-
- # if this comes back empty, nothing contains it, we can skip it as
- # we have probably created the merge commit here locally
- if results['rc'] == 0 and len(results['result']) == 0:
- continue
-
- # The results generally contain origin/pr/246/merge and origin/pr/246/head
- # this is the pull request which would contain the commit in question.
- #
- # If the results do not contain origin/stg then stage does not contain
- # the commit in question. Therefore we need to alert!
- if 'origin/stg' not in results['result']:
- print "\nFAILED: (These commits are not in stage.)\n"
- print "\t%s" % commit
- count += 1
-
- # Exit with count of commits in #{branch} but not stg
- sys.exit(count)
-
-if __name__ == '__main__':
- main()
diff --git a/git/pylint.sh b/git/pylint.sh
deleted file mode 100755
index 3acf9cc8c..000000000
--- a/git/pylint.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env bash
-set -eu
-
-ANSIBLE_UPSTREAM_FILES=(
- 'inventory/aws/hosts/ec2.py'
- 'inventory/gce/hosts/gce.py'
- 'inventory/libvirt/hosts/libvirt_generic.py'
- 'inventory/openstack/hosts/nova.py'
- 'lookup_plugins/sequence.py'
- 'playbooks/gce/openshift-cluster/library/gce.py'
- )
-
-OLDREV=$1
-NEWREV=$2
-#TRG_BRANCH=$3
-
-PYTHON=$(which python)
-
-set +e
-PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
-set -e
-
-FILES_TO_TEST=""
-
-for PY_FILE in $PY_DIFF; do
- IGNORE_FILE=false
- for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do
- if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then
- IGNORE_FILE=true
- break
- fi
- done
-
- if [ "${IGNORE_FILE}" == true ]; then
- echo "Skipping file ${PY_FILE} as an upstream Ansible file..."
- continue
- fi
-
- if [ -e "${PY_FILE}" ]; then
- FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}"
- fi
-done
-
-export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
-
-if [ "${FILES_TO_TEST}" != "" ]; then
- echo "Testing files: ${FILES_TO_TEST}"
- exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
-else
- exit 0
-fi
diff --git a/git/yaml_validation.py b/git/yaml_validation.py
deleted file mode 100755
index 6672876bb..000000000
--- a/git/yaml_validation.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-#
-# python yaml validator for a git commit
-#
-'''
-python yaml validator for a git commit
-'''
-import shutil
-import sys
-import os
-import tempfile
-import subprocess
-import yaml
-
-def get_changes(oldrev, newrev, tempdir):
- '''Get a list of git changes from oldrev to newrev'''
- proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev,
- newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE)
- stdout, _ = proc.communicate()
- files = stdout.split('\n')
-
- # No file changes
- if not files:
- return []
-
- cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir)
- proc = subprocess.Popen(cmd, shell=True)
- _, _ = proc.communicate()
-
- rfiles = []
- for dirpath, _, fnames in os.walk(tempdir):
- for fname in fnames:
- rfiles.append(os.path.join(dirpath, fname))
-
- return rfiles
-
-def main():
- '''
- Perform yaml validation
- '''
- results = []
- try:
- tmpdir = tempfile.mkdtemp(prefix='jenkins-git-')
- old, new, _ = sys.argv[1:]
-
- for file_mod in get_changes(old, new, tmpdir):
-
- print "+++++++ Received: %s" % file_mod
-
- # if the file extensions is not yml or yaml, move along.
- if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml'):
- continue
-
- # We use symlinks in our repositories, ignore them.
- if os.path.islink(file_mod):
- continue
-
- try:
- yaml.load(open(file_mod))
- results.append(True)
-
- except yaml.scanner.ScannerError as yerr:
- print yerr
- results.append(False)
- finally:
- shutil.rmtree(tmpdir)
-
- if not all(results):
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index cce3c5f35..2be46a58c 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -70,7 +70,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
-Version: 0.0.1
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
+Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
@@ -84,13 +85,19 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v1"
+USER_AGENT_VERSION="v2"
import sys
import os
import argparse
+
+from time import time
+
import ConfigParser
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
try:
import json
except ImportError:
@@ -101,31 +108,103 @@ try:
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
- print("GCE inventory script requires libcloud >= 0.13")
- sys.exit(1)
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
class GceInventory(object):
def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
+ self.config = self.get_config()
self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
# Just display data for specific host
if self.args.host:
- print(self.json_format_dict(self.node_to_dict(
- self.get_instance(self.args.host)),
- pretty=self.args.pretty))
- sys.exit(0)
-
- # Otherwise, assume user wants all instances grouped
- print(self.json_format_dict(self.group_instances(),
- pretty=self.args.pretty))
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
sys.exit(0)
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@@ -140,14 +219,57 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
config.read(gce_ini_path)
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
# Attempt to get GCE params from a configuration file, if one
# exists.
- secrets_path = config.get('gce', 'libcloud_secrets')
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@@ -161,8 +283,7 @@ class GceInventory(object):
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
- print(err)
- sys.exit(1)
+ sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
@@ -173,10 +294,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
- config.get('gce','gce_service_account_email_address'),
- config.get('gce','gce_service_account_pem_file_path')
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
]
- kwargs = {'project': config.get('gce', 'gce_project_id')}
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@@ -191,6 +312,14 @@ class GceInventory(object):
)
return gce
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
def parse_cli_args(self):
''' Command line argument processing '''
@@ -202,6 +331,9 @@ class GceInventory(object):
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
@@ -211,11 +343,17 @@ class GceInventory(object):
if inst is None:
return {}
- if inst.extra['metadata'].has_key('items'):
+ if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@@ -231,29 +369,67 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+ 'ansible_ssh_host': ssh_host
}
- def get_instance(self, instance_name):
- '''Gets details about a specific instance '''
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
try:
- return self.driver.ex_get_node(instance_name)
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
except Exception as e:
- return None
-
- def group_instances(self):
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ self.driver.connection.gce_params=params
+ all_nodes.extend(self.driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
- for node in self.driver.list_nodes():
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
- if groups.has_key(zone): groups[zone].append(name)
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
@@ -262,25 +438,25 @@ class GceInventory(object):
tag = t[6:]
else:
tag = 'tag_%s' % t
- if groups.has_key(tag): groups[tag].append(name)
+ if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
- if groups.has_key(net): groups[net].append(name)
+ if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
- if groups.has_key(machine_type): groups[machine_type].append(name)
+ if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
- if groups.has_key(image): groups[image].append(name)
+ if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
- if groups.has_key(stat): groups[stat].append(name)
+ if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
@@ -296,6 +472,6 @@ class GceInventory(object):
else:
return json.dumps(data)
-
# Run the script
-GceInventory()
+if __name__ == '__main__':
+ GceInventory()
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
index ac2f0430a..d63e07b64 100755
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -61,11 +61,11 @@ class LibvirtInventory(object):
self.parse_cli_args()
if self.args.host:
- print _json_format_dict(self.get_host_info(), self.args.pretty)
+ print(_json_format_dict(self.get_host_info(), self.args.pretty))
elif self.args.list:
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
else: # default action with no options
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
def read_settings(self):
''' Reads the settings from the libvirt.ini file '''
@@ -115,12 +115,12 @@ class LibvirtInventory(object):
conn = libvirt.openReadOnly(self.libvirt_uri)
if conn is None:
- print "Failed to open connection to %s" % self.libvirt_uri
+ print("Failed to open connection to %s" % self.libvirt_uri)
sys.exit(1)
domains = conn.listAllDomains()
if domains is None:
- print "Failed to list domains for connection %s" % self.libvirt_uri
+ print("Failed to list domains for connection %s" % self.libvirt_uri)
sys.exit(1)
for domain in domains:
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 665ede1cb..955772486 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -15,6 +15,7 @@ BuildArch: noarch
Requires: ansible >= 2.2.0.0-1
Requires: python2
+Requires: python-six
Requires: openshift-ansible-docs = %{version}-%{release}
%description
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index b9966e715..f0cfa7f55 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -75,6 +75,10 @@
- hosts: nodes
become: yes
+ vars:
+ node_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -83,63 +87,66 @@
with_items:
- firewalld
- - name: Remove packages
- package: name={{ item }} state=absent
- when: not is_atomic | bool
- with_items:
- - atomic-enterprise
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - atomic-openshift
- - atomic-openshift-clients
- - atomic-openshift-excluder
- - atomic-openshift-docker-excluder
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - cockpit-bridge
- - cockpit-docker
- - cockpit-shell
- - cockpit-ws
- - kubernetes-client
- - openshift
- - openshift-node
- - openshift-sdn
- - openshift-sdn-ovs
- - openvswitch
- - origin
- - origin-excluder
- - origin-docker-excluder
- - origin-clients
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - tuned-profiles-atomic-openshift-node
- - tuned-profiles-openshift-node
- - tuned-profiles-origin-node
-
- - name: Remove flannel package
- package: name=flannel state=absent
- when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - name: Remove br0 interface
- shell: ovs-vsctl del-br br0
- changed_when: False
- failed_when: False
-
- - name: Remove linux interfaces
- shell: ip link del "{{ item }}"
- changed_when: False
- failed_when: False
- with_items:
- - lbr0
- - vlinuxbr
- - vovsbr
+ - block:
+ - block:
+ - name: Remove packages
+ package: name={{ item }} state=absent
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - kubernetes-client
+ - openshift
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-excluder
+ - origin-docker-excluder
+ - origin-clients
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove flannel package
+ package: name=flannel state=absent
+ when: openshift_use_flannel | default(false) | bool
+ when: "{{ not is_atomic | bool }}"
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+ when: "{{ openshift_remove_all | default(true) | bool }}"
- shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
@@ -176,28 +183,57 @@
failed_when: False
with_items: "{{ exited_containers_to_delete.results }}"
- - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
- changed_when: False
- failed_when: False
- register: images_to_delete
+ - block:
+ - block:
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+ when: "{{ openshift_uninstall_images | default(True) | bool }}"
+
+ - name: remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
+ - /run/openshift-sdn
+ when: "{{ openshift_remove_all | default(True) | bool }}"
+
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- - registry\.qe\.openshift\.com/.*
- - registry\.access\..*redhat\.com/rhel7/etcd
- - docker.io/openshift
- when: openshift_uninstall_images | default(True) | bool
-
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ images_to_delete.results }}"
- when: openshift_uninstall_images | default(True) | bool
+ - "{{ node_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ node_dirs }}"
- - name: Remove sdn drop files
- file:
- path: /run/openshift-sdn
- state: absent
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
- name: Remove remaining files
file: path={{ item }} state=absent
@@ -209,13 +245,10 @@
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- - /etc/sysconfig/openshift-node
- /etc/sysconfig/openshift-node-dep
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node-dep
@@ -227,10 +260,8 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
- - /run/openshift-sdn
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- name: restart docker
service: name=docker state=restarted
@@ -238,9 +269,12 @@
- name: restart NetworkManager
service: name=NetworkManager state=restarted
-
- hosts: masters
become: yes
+ vars:
+ master_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -252,7 +286,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
@@ -283,6 +317,33 @@
- shell: systemctl daemon-reload
changed_when: False
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ when: openshift_remove_all | default(True) | bool
+ with_items:
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/openvswitch
+
+ - find: path={{ item }} file_type=file
+ register: files
+ with_items:
+ - "{{ master_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ master_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
@@ -292,7 +353,6 @@
- /etc/corosync
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/systemd/system/atomic-openshift-master.service
- /etc/systemd/system/atomic-openshift-master-api.service
- /etc/systemd/system/atomic-openshift-master-controllers.service
@@ -303,14 +363,12 @@
- /etc/sysconfig/atomic-enterprise-master
- /etc/sysconfig/atomic-enterprise-master-api
- /etc/sysconfig/atomic-enterprise-master-controllers
- - /etc/sysconfig/atomic-openshift-master
- /etc/sysconfig/atomic-openshift-master-api
- /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /etc/sysconfig/openshift-master
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
@@ -318,7 +376,6 @@
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- /var/lib/pacemaker
- /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
@@ -339,6 +396,10 @@
- hosts: etcd
become: yes
+ vars:
+ etcd_dirs:
+ - "/etc/etcd"
+ - "/var/lib/etcd"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -358,7 +419,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- etcd
- etcd3
@@ -369,13 +430,25 @@
- shell: systemctl daemon-reload
changed_when: False
- - name: Remove remaining files
- file: path={{ item }} state=absent
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - /etc/ansible/facts.d/openshift.fact
- - /etc/etcd
- - /etc/systemd/system/etcd_container.service
- - /etc/profile.d/etcdctl.sh
+ - "{{ etcd_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ etcd_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
@@ -385,6 +458,13 @@
warn: no
failed_when: false
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
+
- hosts: lb
become: yes
tasks:
@@ -397,7 +477,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
@@ -411,4 +491,4 @@
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- - /var/lib/haproxy
+ - /var/lib/haproxy/stats
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index cd2f2e6aa..7839b85e8 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,27 +26,6 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- - role: openshift_cli
- - role: openshift_hosted_facts
- - role: openshift_projects
- # TODO: Move standard project definitions to openshift_hosted/vars/main.yml
- # Vars are not accessible in meta/main.yml in ansible-1.9.x
- openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 8058d3377..21f3c80a1 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -133,9 +133,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: openshift_master_facts
- - role: openshift_hosted_facts
- - role: openshift_master_certificates
+ - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
@@ -145,35 +143,12 @@
| oo_select_keys(groups['oo_masters_to_config'] | default([]))
| oo_collect('openshift.common.all_hostnames')
| oo_flatten | unique }}"
- - role: openshift_etcd_client_certificates
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- - role: openshift_clock
- - role: openshift_cloud_provider
- - role: openshift_builddefaults
- - role: os_firewall
- os_firewall_allow:
- - service: api server https
- port: "{{ openshift.master.api_port }}/tcp"
- - service: api controllers https
- port: "{{ openshift.master.controllers_port }}/tcp"
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- - role: os_firewall
- os_firewall_allow:
- - service: etcd embedded
- port: 4001/tcp
- when: groups.oo_etcd_to_config | default([]) | length == 0
- - role: openshift_master
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- - role: nickhammond.logrotate
- - role: nuage_master
- when: openshift.common.use_nuage | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index e28da5713..b36c0eedf 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -60,30 +60,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -99,30 +77,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Additional node config
hosts: oo_nodes_to_config
diff --git a/requirements.txt b/requirements.txt
index e55ef5f0b..8f47033f8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,4 @@
-ansible>=2.1
+ansible>=2.2
+six
pyOpenSSL
+PyYAML
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 7161b5277..a474b36b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -4,17 +4,13 @@
"""For details on this module see DOCUMENTATION (below)"""
-# router/registry cert grabbing
-import subprocess
-# etcd config file
-import ConfigParser
-# Expiration parsing
import datetime
-# File path stuff
import os
-# Config file parsing
+import subprocess
+
+from six.moves import configparser
+
import yaml
-# Certificate loading
import OpenSSL.crypto
DOCUMENTATION = '''
@@ -260,7 +256,10 @@ Return:
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
-# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
@@ -479,13 +478,17 @@ an OpenShift Container Platform cluster
etcd_cert_params.append('dne')
try:
with open('/etc/etcd/etcd.conf', 'r') as fp:
- etcd_config = ConfigParser.ConfigParser()
+ etcd_config = configparser.ConfigParser()
+ # Reason: This check is disabled because the issue was introduced
+ # during a period where the pylint checks weren't enabled for this file
+ # Status: temporarily disabled pending future refactoring
+ # pylint: disable=deprecated-method
etcd_config.readfp(FakeSecHead(fp))
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
- except ConfigParser.NoOptionError:
+ except configparser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d7e3596fd..6baf9d016 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,13 +7,6 @@
"""Ansible module for retrieving and setting openshift related facts"""
-try:
- # python2
- import ConfigParser
-except ImportError:
- # python3
- import configparser as ConfigParser
-
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
@@ -26,8 +19,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from six import string_types
-from six import text_type
+from six import string_types, text_type
+from six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -776,7 +769,7 @@ def set_etcd_facts_if_unset(facts):
# Add a fake section for parsing:
ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -1256,6 +1249,13 @@ def is_service_running(service):
return service_running
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1292,7 +1292,7 @@ def get_hosted_registry_insecure():
try:
ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
@@ -1561,15 +1561,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1966,6 +1966,11 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 74c50ae1d..ca5e88b15 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -11,4 +11,23 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_cli
+- role: openshift_hosted_facts
+- role: openshift_projects
+ openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - hostnetwork
+ when: openshift.common.version_gte_3_2_or_1_2
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ when: not openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 7457e4378..3a595b2d1 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -11,4 +11,33 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_master_facts
+- role: openshift_hosted_facts
+- role: openshift_master_certificates
+- role: openshift_etcd_client_certificates
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+- role: openshift_clock
+- role: openshift_cloud_provider
+- role: openshift_builddefaults
+- role: os_firewall
+ os_firewall_allow:
+ - service: api server https
+ port: "{{ openshift.master.api_port }}/tcp"
+ - service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+- role: os_firewall
+ os_firewall_allow:
+ - service: etcd embedded
+ port: 4001/tcp
+ when: groups.oo_etcd_to_config | default([]) | length == 0
+- role: nickhammond.logrotate
+- role: nuage_master
+ when: openshift.common.use_nuage | bool
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c39269f33..56dee2958 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -11,4 +11,26 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_common
+- role: openshift_clock
+- role: openshift_docker
+- role: openshift_node_certificates
+- role: openshift_cloud_provider
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq | bool
+- role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.node.use_openshift_sdn | bool
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..d55df9d37
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,27 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
+
+[nosetests]
+tests=roles/openshift_master_facts/test/, test/
+verbosity=2
+with-coverage=1
+cover-html=1
+cover-inclusive=1
+cover-min-percentage=70
+cover-erase=1
+detailed-errors=1
+cover-branches=1
+
+[yamllint]
+excludes=.tox,utils,files
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
+
+[flake8]
+exclude=.tox/*,setup.py,utils/*,inventory/*
+max_line_length = 120
+ignore = E501,T003
diff --git a/setup.py b/setup.py
new file mode 100644
index 000000000..e598c3502
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,191 @@
+"""A setuptools based setup module.
+
+"""
+from __future__ import print_function
+
+import os
+import fnmatch
+import re
+
+import yaml
+
+# Always prefer setuptools over distutils
+from setuptools import setup, Command
+from setuptools_lint.setuptools_command import PylintCommand
+from six import string_types
+from yamllint.config import YamlLintConfig
+from yamllint.cli import Format
+from yamllint import linter
+
+def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
+ ''' find files matching file_regex '''
+ found = []
+ exclude_regex = ''
+ include_regex = ''
+
+ if exclude_dirs is not None:
+ exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
+
+ if include_dirs is not None:
+ include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
+
+ for root, dirs, files in os.walk(base_dir):
+ if exclude_dirs is not None:
+ # filter out excludes for dirs
+ dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
+
+ if include_dirs is not None:
+ # filter for includes for dirs
+ dirs[:] = [d for d in dirs if re.match(include_regex, d)]
+
+ matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
+ found.extend(matches)
+
+ return found
+
+
+class OpenShiftAnsibleYamlLint(Command):
+ ''' Command to run yamllint '''
+ description = "Run yamllint tests"
+ user_options = [
+ ('excludes=', 'e', 'directories to exclude'),
+ ('config-file=', 'c', 'config file to use'),
+ ('format=', 'f', 'format to use (standard, parsable)'),
+ ]
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ # Reason: Defining these attributes as a part of initialize_options is
+ # consistent with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ self.excludes = None
+ self.config_file = None
+ self.format = None
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ # Reason: These attributes are defined in initialize_options and this
+ # usage is consistant with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ if isinstance(self.excludes, string_types):
+ self.excludes = self.excludes.split(',')
+ if self.format is None:
+ self.format = 'standard'
+ assert (self.format in ['standard', 'parsable']), (
+ 'unknown format {0}.'.format(self.format))
+ if self.config_file is None:
+ self.config_file = '.yamllint'
+ assert os.path.isfile(self.config_file), (
+ 'yamllint config file {0} does not exist.'.format(self.config_file))
+
+ def run(self):
+ ''' run command '''
+ if self.excludes is not None:
+ print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
+
+ config = YamlLintConfig(file=self.config_file)
+
+ has_errors = False
+ has_warnings = False
+
+ if self.format == 'parsable':
+ format_method = Format.parsable
+ else:
+ format_method = Format.standard_color
+
+ for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
+ first = True
+ with open(yaml_file, 'r') as contents:
+ for problem in linter.run(contents, config):
+ if first and self.format != 'parsable':
+ print('\n{0}:'.format(os.path.relpath(yaml_file)))
+ first = False
+
+ print(format_method(problem, yaml_file))
+ if problem.level == linter.PROBLEM_LEVELS['error']:
+ has_errors = True
+ elif problem.level == linter.PROBLEM_LEVELS['warning']:
+ has_warnings = True
+
+ assert not has_errors, 'yamllint errors found'
+ assert not has_warnings, 'yamllint warnings found'
+
+
+class OpenShiftAnsiblePylint(PylintCommand):
+ ''' Class to override the default behavior of PylintCommand '''
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def find_all_modules(self):
+ ''' find all python files to test '''
+ exclude_dirs = ['.tox', 'utils', 'test', 'tests', 'git']
+ modules = []
+ for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
+ package = os.path.basename(match).replace('.py', '')
+ modules.append(('openshift_ansible', package, match))
+ return modules
+
+ def get_finalized_command(self, cmd):
+ ''' override get_finalized_command to ensure we use our
+ find_all_modules method '''
+ if cmd == 'build_py':
+ return self
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def with_project_on_sys_path(self, func, func_args, func_kwargs):
+ ''' override behavior, since we don't need to build '''
+ return func(*func_args, **func_kwargs)
+
+
+class UnsupportedCommand(Command):
+ ''' Basic Command to override unsupported commands '''
+ user_options = []
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def finalize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def run(self):
+ ''' run command '''
+ print("Unsupported command for openshift-ansible")
+
+
+setup(
+ name='openshift-ansible',
+ license="Apache 2.0",
+ cmdclass={
+ 'install': UnsupportedCommand,
+ 'develop': UnsupportedCommand,
+ 'build': UnsupportedCommand,
+ 'build_py': UnsupportedCommand,
+ 'build_ext': UnsupportedCommand,
+ 'egg_info': UnsupportedCommand,
+ 'sdist': UnsupportedCommand,
+ 'lint': OpenShiftAnsiblePylint,
+ 'yamllint': OpenShiftAnsibleYamlLint,
+ },
+ packages=[],
+)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 000000000..2ee1e657d
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,11 @@
+six
+pyOpenSSL
+flake8
+flake8-mutable
+flake8-print
+pylint
+setuptools-lint
+PyYAML
+yamllint
+nose
+coverage
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..c0e7732c3
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+minversion=2.3.1
+envlist =
+ py{27,35}-ansible22-{pylint,unit,flake8}
+ yamllint
+skipsdist=True
+skip_missing_interpreters=True
+
+[testenv]
+deps =
+ -rtest-requirements.txt
+ py35-flake8: flake8-bugbear
+ ansible22: ansible~=2.2
+
+commands =
+ flake8: flake8
+ pylint: python setup.py lint
+ yamllint: python setup.py yamllint
+ unit: nosetests
diff --git a/utils/.pylintrc b/utils/.pylintrc
new file mode 120000
index 000000000..30b33b524
--- /dev/null
+++ b/utils/.pylintrc
@@ -0,0 +1 @@
+../.pylintrc \ No newline at end of file
diff --git a/utils/Makefile b/utils/Makefile
index 2a37b922c..038c31fcf 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -46,7 +46,7 @@ clean:
@find . -type f \( -name "*~" -or -name "#*" \) -delete
@rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
@rm -fR $(VENV)
-
+ @rm -fR .tox
# To force a rebuild of the docs run 'touch' on any *.in file under
# docs/man/man1/
@@ -84,41 +84,27 @@ ci-unittests: $(VENV)
@echo "#############################################"
@echo "# Running Unit Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && tox -e py27-unit
- . $(VENV)/bin/activate && tox -e py35-unit
+ . $(VENV)/bin/activate && detox -e py27-unit,py35-unit
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
ci-pylint: $(VENV)
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES)
-
-ci-yamllint: $(VENV)
- @echo "#############################################"
- @echo "# Running yamllint Tests in virtualenv"
- @echo "#############################################"
- @. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES)
-
-ci-list-deps: $(VENV)
- @echo "#############################################"
- @echo "# Listing all pip deps"
- @echo "#############################################"
- . $(VENV)/bin/activate && pip freeze
+ . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint
ci-flake8: $(VENV)
@echo "#############################################"
@echo "# Running Flake8 Compliance Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && tox -e py27-flake8
- . $(VENV)/bin/activate && tox -e py35-flake8
+ . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8
-ci-tox:
- . $(VENV)/bin/activate && tox
+ci-tox: $(VENV)
+ . $(VENV)/bin/activate && detox
-ci: ci-list-deps ci-tox ci-pylint ci-yamllint
+ci: ci-tox
@echo
@echo "##################################################################################"
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
@echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
+ @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'"
diff --git a/utils/README.md b/utils/README.md
index 2abf2705e..c37ab41e6 100644
--- a/utils/README.md
+++ b/utils/README.md
@@ -6,6 +6,47 @@ Run the command:
to run an array of unittests locally.
+Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows
+for running tests in parallel
+
+
+```
+pip install tox detox
+```
+
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
+```
+source .tox/py27-ansible22-flake8/bin/activate
+```
+
You will get errors if the log files already exist and can not be
written to by the current user (`/tmp/ansible.log` and
`/tmp/installer.txt`). *We're working on it.*
diff --git a/utils/setup.cfg b/utils/setup.cfg
index ea07eea9f..862dffd7b 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -5,7 +5,6 @@
universal=1
[nosetests]
-tests=../,../roles/openshift_master_facts/test/,test/
verbosity=2
with-coverage=1
cover-html=1
@@ -19,3 +18,6 @@ cover-branches=1
max-line-length=120
exclude=test/*,setup.py,oo-installenv
ignore=E501
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index e5c5360c3..f6a7bde10 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -1,6 +1,7 @@
ansible
configparser
pylint
+setuptools-lint
nose
coverage
mock
@@ -11,3 +12,4 @@ backports.functools_lru_cache
pyOpenSSL
yamllint
tox
+detox
diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py
new file mode 100644
index 000000000..5847fe37b
--- /dev/null
+++ b/utils/test/openshift_ansible_tests.py
@@ -0,0 +1,71 @@
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from six.moves import configparser
+
+from ooinstall import openshift_ansible
+from ooinstall.oo_config import Host, OOConfig
+
+
+BASE_CONFIG = """
+---
+variant: openshift-enterprise
+variant_version: 3.3
+version: v2
+deployment:
+ ansible_ssh_user: cloud-user
+ hosts: []
+ roles:
+ master:
+ node:
+"""
+
+
+class TestOpenShiftAnsible(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests')
+ self.configfile = os.path.join(self.work_dir, 'ooinstall.config')
+ with open(self.configfile, 'w') as config_file:
+ config_file.write(BASE_CONFIG)
+ self.inventory = os.path.join(self.work_dir, 'hosts')
+ config = OOConfig(self.configfile)
+ config.settings['ansible_inventory_path'] = self.inventory
+ openshift_ansible.set_config(config)
+
+ def tearDown(self):
+ shutil.rmtree(self.work_dir)
+
+ def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False):
+ hosts = []
+ for num in range(1, num_hosts + 1):
+ hosts.append(Host(connect_to=name_prefix + str(num),
+ roles=roles, new_host=new_host))
+ return hosts
+
+ def test_generate_inventory_new_nodes(self):
+ hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd']))
+ hosts.extend(self.generate_hosts(1, 'node', roles=['node']))
+ hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True))
+ openshift_ansible.generate_inventory(hosts)
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('new_nodes'))
+ self.assertTrue(inventory.has_option('new_nodes', 'new_node1'))
+
+ def test_write_inventory_vars_role_vars(self):
+ with open(self.inventory, 'w') as inv:
+ openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'}
+ openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'}
+ openshift_ansible.write_inventory_vars(inv, None)
+
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('masters:vars'))
+ self.assertEquals('blue', inventory.get('masters:vars', 'color'))
+ self.assertTrue(inventory.has_section('nodes:vars'))
+ self.assertEquals('green', inventory.get('nodes:vars', 'color'))
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index b18f85692..cbce64f7e 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -38,7 +38,6 @@ class TestUtils(unittest.TestCase):
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(self.debug_all_params)
- print(_il.debug.call_args_list)
# Debug was called for each item we expect
self.assertEqual(
diff --git a/utils/tox.ini b/utils/tox.ini
index 747d79dfe..1308f7505 100644
--- a/utils/tox.ini
+++ b/utils/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion=2.3.1
envlist =
- py{27,35}-{flake8,unit}
+ py{27,35}-{flake8,unit,pylint}
skipsdist=True
skip_missing_interpreters=True
@@ -10,8 +10,7 @@ usedevelop=True
deps =
-rtest-requirements.txt
py35-flake8: flake8-bugbear
-
commands =
- flake8: flake8 --config=setup.cfg ../ --exclude="../utils,.tox,../inventory"
flake8: python setup.py flake8
unit: python setup.py nosetests
+ pylint: python setup.py lint