summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc5
-rw-r--r--.dockerignore8
-rw-r--r--.flake85
-rw-r--r--.pylintrc2
-rw-r--r--.travis.yml30
-rw-r--r--CONTRIBUTING.md9
-rw-r--r--Dockerfile47
-rw-r--r--Dockerfile.rhel726
-rw-r--r--README.md4
-rw-r--r--README_CONTAINER_IMAGE.md41
-rw-r--r--inventory/byo/hosts.origin.example46
-rw-r--r--inventory/byo/hosts.ose.example44
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml14
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml3
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml2
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml18
-rw-r--r--pytest.ini16
-rw-r--r--requirements.txt7
-rw-r--r--roles/docker/templates/custom.conf.j22
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py (renamed from roles/lib_openshift/library/oc_sdnvalidator.py)80
-rw-r--r--roles/lib_openshift/library/oc_project.py1671
-rw-r--r--roles/lib_openshift/src/ansible/oc_objectvalidator.py (renamed from roles/lib_openshift/src/ansible/oc_sdnvalidator.py)4
-rw-r--r--roles/lib_openshift/src/ansible/oc_project.py33
-rw-r--r--roles/lib_openshift/src/class/oc_objectvalidator.py86
-rw-r--r--roles/lib_openshift/src/class/oc_project.py185
-rw-r--r--roles/lib_openshift/src/class/oc_sdnvalidator.py58
-rw-r--r--roles/lib_openshift/src/doc/objectvalidator (renamed from roles/lib_openshift/src/doc/sdnvalidator)14
-rw-r--r--roles/lib_openshift/src/doc/project81
-rw-r--r--roles/lib_openshift/src/lib/project.py85
-rw-r--r--roles/lib_openshift/src/sources.yml19
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_project.yml83
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_sdnvalidator.py481
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oadm_manage_node.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_env.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_label.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_objectvalidator.py903
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_process.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_project.py110
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_route.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_scale.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py20
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_version.py20
-rwxr-xr-xroles/lib_utils/src/test/unit/test_repoquery.py20
-rwxr-xr-xroles/lib_utils/src/test/unit/test_yedit.py14
-rw-r--r--roles/openshift_excluder/README.md17
-rw-r--r--roles/openshift_excluder/defaults/main.yml4
-rw-r--r--roles/openshift_excluder/tasks/adjust.yml23
-rw-r--r--roles/openshift_excluder/tasks/disable.yml26
-rw-r--r--roles/openshift_excluder/tasks/enable.yml21
-rw-r--r--roles/openshift_excluder/tasks/exclude.yml27
-rw-r--r--roles/openshift_excluder/tasks/init.yml12
-rw-r--r--roles/openshift_excluder/tasks/install.yml29
-rw-r--r--roles/openshift_excluder/tasks/reset.yml12
-rw-r--r--roles/openshift_excluder/tasks/status.yml103
-rw-r--r--roles/openshift_excluder/tasks/unexclude.yml23
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py9
-rw-r--r--roles/openshift_facts/tasks/main.yml9
-rw-r--r--roles/openshift_facts/vars/main.yml5
-rw-r--r--roles/openshift_hosted/defaults/main.yml28
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml87
-rw-r--r--roles/openshift_logging/meta/main.yaml1
-rw-r--r--roles/openshift_master_facts/test/conftest.py2
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py15
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py15
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service2
-rw-r--r--setup.cfg27
-rw-r--r--test-requirements.txt4
-rw-r--r--tox.ini11
-rw-r--r--utils/.coveragerc18
l---------utils/.pylintrc1
-rw-r--r--utils/Makefile110
-rw-r--r--utils/README.md61
-rw-r--r--utils/setup.cfg27
-rw-r--r--utils/setup.py11
-rw-r--r--utils/test-requirements.txt15
-rw-r--r--utils/test/cli_installer_tests.py102
-rw-r--r--utils/test/fixture.py13
-rw-r--r--utils/test/oo_config_tests.py39
-rw-r--r--utils/test/openshift_ansible_tests.py26
-rw-r--r--utils/test/test_utils.py8
-rw-r--r--utils/tox.ini19
99 files changed, 4036 insertions, 1502 deletions
diff --git a/.coveragerc b/.coveragerc
index 6f33e0bfe..00f46b61b 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -4,16 +4,17 @@ omit =
*/lib/python*/site-packages/*
*/lib/python*/*
/usr/*
- setup.py
+ */setup.py
# TODO(rhcarvalho): this is used to ignore test files from coverage report.
# We can make this less generic when we stick with a single test pattern in
# the repo.
*/conftest.py
*/test_*.py
*/*_tests.py
+ */test/*
[report]
-fail_under = 26
+fail_under = 29
[html]
directory = cover
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000..968811df5
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,8 @@
+.*
+bin
+docs
+test
+utils
+**/*.md
+*.spec
+setup*
diff --git a/.flake8 b/.flake8
new file mode 100644
index 000000000..99ae3c2f0
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,5 @@
+[flake8]
+# TODO: cleanup flake8 issues with utils/test/*
+exclude=.tox,inventory,utils/test
+max_line_length = 120
+ignore = E501,T003
diff --git a/.pylintrc b/.pylintrc
index fd6c6d0bd..e85987de3 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -60,7 +60,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-#disable=
+disable=fixme,locally-disabled,file-ignored,duplicate-code
[REPORTS]
diff --git a/.travis.yml b/.travis.yml
index f0a228c23..0698b0280 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,17 +4,37 @@ sudo: false
cache:
- pip
+before_cache:
+ - rm ~/.cache/pip/log/debug.log
+
language: python
python:
- "2.7"
- "3.5"
install:
- - pip install -r requirements.txt
- - pip install tox-travis
+ - pip install tox-travis coveralls
script:
- # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- - ansible-playbook --syntax-check playbooks/byo/config.yml
- tox
- - cd utils && tox
+
+after_success:
+ - coveralls
+
+notifications:
+ email:
+ recipients:
+ - jdetiber@redhat.com
+ - sdodson@redhat.com
+ on_success: change
+ on_failure: always
+ irc:
+ channels:
+ - chat.freenode.net#openshift-dev
+ on_success: change
+ on_failure: always
+ template:
+ - "%{repository}#%{build_number} (%{branch} - %{commit} : %{author}): %{message}"
+ - "Change view : %{compare_url}"
+ - "Build details : %{build_url}"
+ - "sdodson jdetiber: ^"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 502ef6aa5..12f3efc09 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -72,9 +72,6 @@ See the [RPM build instructions](BUILD.md).
## Running tests
-This section covers how to run tests for the root of this repo, running tests
-for the oo-install wrapper is described in [utils/README.md](utils/README.md).
-
We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
tests. Alternatively, tests can be run using
[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
@@ -120,19 +117,19 @@ detox
Running a particular test environment (python 2.7 flake8 tests in this case):
```
-tox -e py27-ansible22-flake8
+tox -e py27-flake8
```
Running a particular test environment in a clean virtualenv (python 3.5 pylint
tests in this case):
```
-tox -r -e py35-ansible22-pylint
+tox -r -e py35-pylint
```
If you want to enter the virtualenv created by tox to do additional
testing/debugging (py27-flake8 env in this case):
```
-source .tox/py27-ansible22-flake8/bin/activate
+source .tox/py27-flake8/bin/activate
```
## Submitting contributions
diff --git a/Dockerfile b/Dockerfile
index f3d45837a..c6593491d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,26 +1,33 @@
-FROM rhel7
+# Using playbook2image as a base
+# See https://github.com/aweiteka/playbook2image for details on the image
+# including documentation for the settings/env vars referenced below
+FROM docker.io/aweiteka/playbook2image:latest
-MAINTAINER Troy Dawson <tdawson@redhat.com>
+MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL Name="openshift3/installer"
-LABEL Vendor="Red Hat" License=GPLv2+
-LABEL Version="v3.1.1.901"
-LABEL Release="6"
-LABEL BZComponent="aos3-installation-docker"
-LABEL Architecture="x86_64"
-LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
- io.k8s.display-name="Openshift Installer" \
- io.openshift.tags="openshift,installer"
+LABEL name="openshift-ansible" \
+ summary="OpenShift's installation and configuration tool" \
+ description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ url="https://github.com/openshift/openshift-ansible" \
+ io.k8s.display-name="openshift-ansible" \
+ io.k8s.description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
+ io.openshift.expose-services="" \
+ io.openshift.tags="openshift,install,upgrade,ansible"
-RUN INSTALL_PKGS="atomic-openshift-utils" && \
- yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
- rpm -V $INSTALL_PKGS && \
- yum clean all
+# The playbook to be run is specified via the PLAYBOOK_FILE env var.
+# This sets a default of openshift_facts.yml as it's an informative playbook
+# that can help test that everything is set properly (inventory, sshkeys)
+ENV PLAYBOOK_FILE=playbooks/byo/openshift_facts.yml \
+ OPTS="-v" \
+ INSTALL_OC=true
-# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
-VOLUME /var/lib/openshift-installer/
-WORKDIR /var/lib/openshift-installer/
+# playbook2image's assemble script expects the source to be available in
+# /tmp/src (as per the source-to-image specs) so we import it there
+ADD . /tmp/src
-RUN mkdir -p /var/lib/openshift-installer/
+# Running the 'assemble' script provided by playbook2image will install
+# dependencies specified in requirements.txt and install the 'oc' client
+# as per the INSTALL_OC environment setting above
+RUN /usr/libexec/s2i/assemble
-ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
+CMD [ "/usr/libexec/s2i/run" ]
diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7
new file mode 100644
index 000000000..f3d45837a
--- /dev/null
+++ b/Dockerfile.rhel7
@@ -0,0 +1,26 @@
+FROM rhel7
+
+MAINTAINER Troy Dawson <tdawson@redhat.com>
+
+LABEL Name="openshift3/installer"
+LABEL Vendor="Red Hat" License=GPLv2+
+LABEL Version="v3.1.1.901"
+LABEL Release="6"
+LABEL BZComponent="aos3-installation-docker"
+LABEL Architecture="x86_64"
+LABEL io.k8s.description="Ansible code and playbooks for installing Openshift Container Platform." \
+ io.k8s.display-name="Openshift Installer" \
+ io.openshift.tags="openshift,installer"
+
+RUN INSTALL_PKGS="atomic-openshift-utils" && \
+ yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
+ rpm -V $INSTALL_PKGS && \
+ yum clean all
+
+# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
+VOLUME /var/lib/openshift-installer/
+WORKDIR /var/lib/openshift-installer/
+
+RUN mkdir -p /var/lib/openshift-installer/
+
+ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
diff --git a/README.md b/README.md
index c3c022e59..3ec6555e8 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,6 @@
[![Join the chat at https://gitter.im/openshift/openshift-ansible](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/openshift/openshift-ansible)
[![Build Status](https://travis-ci.org/openshift/openshift-ansible.svg?branch=master)](https://travis-ci.org/openshift/openshift-ansible)
+[![Coverage Status](https://coveralls.io/repos/github/openshift/openshift-ansible/badge.svg?branch=master)](https://coveralls.io/github/openshift/openshift-ansible?branch=master)
# OpenShift Ansible
@@ -74,6 +75,9 @@ you are not running a stable release.
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+## Containerized OpenShift Ansible
+
+See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for information on how to package openshift-ansible as a container image.
## Installer Hooks
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
new file mode 100644
index 000000000..f62fc2ab9
--- /dev/null
+++ b/README_CONTAINER_IMAGE.md
@@ -0,0 +1,41 @@
+# Containerized openshift-ansible to run playbooks
+
+The [Dockerfile](Dockerfile) in this repository uses the [playbook2image](https://github.com/aweiteka/playbook2image) source-to-image base image to containerize `openshift-ansible`. The resulting image can run any of the provided playbooks.
+
+**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
+
+## Build
+
+To build a container image of `openshift-ansible`:
+
+1. Using standalone **Docker**:
+
+ cd openshift-ansible
+ docker build -t openshift-ansible .
+
+1. Using an **OpenShift** build:
+
+ oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
+ oc describe imagestream openshift-ansible
+
+## Usage
+
+The base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/aweiteka/playbook2image) documentation.
+
+At the very least, when running a container using an image built this way you must specify:
+
+1. The **playbook** to run. This is set using the `PLAYBOOK_FILE` environment variable.
+1. An **inventory** file. This can be mounted inside the container as a volume and specified with the `INVENTORY_FILE` environment variable. Alternatively you can serve the inventory file from a web server and use the `INVENTORY_URL` environment variable to fetch it.
+1. **ssh keys** so that Ansible can reach your hosts. These should be mounted as a volume under `/opt/app-root/src/.ssh`
+
+Here is an example of how to run a containerized `openshift-ansible` playbook that will check the expiration dates of OpenShift's internal certificates using the [`openshift_certificate_expiry` role](../../roles/openshift_certificate_expiry). The inventory and ssh keys are mounted as volumes (the latter requires setting the uid in the container and SELinux label in the key file via `:Z` so they can be accessed) and the `PLAYBOOK_FILE` environment variable is set to point to an example certificate check playbook that is already part of the image:
+
+ docker run -u `id -u` \
+ -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
+ -v /etc/ansible/hosts:/tmp/inventory \
+ -e INVENTORY_FILE=/tmp/inventory \
+ -e OPTS="-v" \
+ -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
+ openshift-ansible
+
+The [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples) provide additional information on how to use a built image.
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 7741730ad..0ddca6576 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -300,7 +300,51 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
-
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
+#
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 3da9be081..7f80a9639 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -300,6 +300,50 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
+#
+# Router sharding support has been added and can be achieved by supplying the correct
+# data to the inventory. The variable to house the data is openshift_hosted_routers
+# and is in the form of a list. If no data is passed then a default router will be
+# created. There are multiple combinations of router sharding. The one described
+# below supports routers on separate nodes.
+#openshift_hosted_routers:
+#- name: router1
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router1
+# images: "openshift3/ose-${component}:${version}"
+# edits: []
+# certificates:
+# certfile: /path/to/certificate/abc.crt
+# keyfile: /path/to/certificate/abc.key
+# cafile: /path/to/certificate/ca.crt
+#- name: router2
+# stats_port: 1936
+# ports:
+# - 80:80
+# - 443:443
+# replicas: 1
+# namespace: default
+# serviceaccount: router
+# selector: type=router2
+# images: "openshift3/ose-${component}:${version}"
+# certificates:
+# certfile: /path/to/certificate/xyz.crt
+# keyfile: /path/to/certificate/xyz.key
+# cafile: /path/to/certificate/ca.crt
+# edits:
+# # ROUTE_LABELS sets the router to listen for routes
+# # tagged with the provided values
+# - key: spec.template.spec.containers[0].env
+# value:
+# name: ROUTE_LABELS
+# value: "route=external"
+# action: append
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index b1510e062..d268850d8 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b61d9e58a..d11e51640 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index f0b2a2c75..5a0f143ac 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 82a1d0935..25d8cd2ba 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7ae1b3e6e..d52f3c111 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ec63ea60e..07c734a40 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 69cabcd33..e4db65b02 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -46,7 +46,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
@@ -82,6 +82,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 719057d2b..a2f1cd2b1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -54,7 +54,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
@@ -90,6 +90,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/validator.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 259be6f8e..f858de3d5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -47,7 +47,7 @@
tags:
- pre_upgrade
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
+- include: ../../../../common/openshift-cluster/upgrades/disable_excluder.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 113b401f9..82f711f40 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -27,6 +27,9 @@
when: openshift_docker_selinux_enabled is not defined
- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
tags:
- always
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
index eb146bab8..b2e025cb8 100644
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ b/playbooks/common/openshift-cluster/disable_excluder.yml
@@ -3,9 +3,15 @@
hosts: l_oo_all_hosts
gather_facts: no
tasks:
+
+ # During installation the excluders are installed with present state.
+ # So no pre-validation check here as the excluders are either to be installed (present = latest)
+ # or they are not going to be updated if already installed
+
+ # disable excluders based on their status
- include_role:
name: openshift_excluder
- tasks_from: status
- - include_role:
- name: openshift_excluder
- tasks_from: unexclude
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: present
+ docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 6b40176e1..7f37c606f 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -19,6 +19,9 @@
when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
- include: disable_excluder.yml
+ vars:
+ # the excluders needs to be disabled no matter what status says
+ with_status_check: false
tags:
- always
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 35eedd5ee..a7b614341 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -51,7 +51,7 @@
name: router-certs
namespace: default
state: absent
- run_once: true
+ run_once: true
- name: Remove router service annotations
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
index fe86f4c23..7c544ee32 100644
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ b/playbooks/common/openshift-cluster/reset_excluder.yml
@@ -5,4 +5,4 @@
tasks:
- include_role:
name: openshift_excluder
- tasks_from: reset
+ tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
new file mode 100644
index 000000000..2a85dc92e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
@@ -0,0 +1,21 @@
+---
+- name: Record excluder state and disable
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include: pre/validate_excluder.yml
+ vars:
+ #repoquery_cmd: repoquery_cmd
+ #openshift_upgrade_target: openshift_upgrade_target
+ excluder: "{{ item }}"
+ with_items:
+ - "{{ openshift.common.service_type }}-docker-excluder"
+ - "{{ openshift.common.service_type }}-excluder"
+
+ # disable excluders based on their status
+ - include_role:
+ name: openshift_excluder
+ tasks_from: disable
+ vars:
+ openshift_excluder_package_state: latest
+ docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
new file mode 100644
index 000000000..5078638b7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
@@ -0,0 +1,22 @@
+---
+# input variables:
+# - repoquery_cmd
+# - excluder
+# - openshift_upgrade_target
+- name: Get available excluder version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
+ register: excluder_version
+ failed_when: false
+ changed_when: false
+
+- name: Docker excluder version detected
+ debug:
+ msg: "{{ excluder }}: {{ excluder_version.stdout }}"
+
+- name: Check the available {{ excluder }} version is at most of the upgrade target version
+ fail:
+ msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version {{ openshift_upgrade_target }}"
+ when:
+ - "{{ excluder_version.stdout != '' }}"
+ - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target, '>', strict=True) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index fd01a6625..babb7191d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -173,7 +173,11 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm -o name
+ register: reconcile_cluster_role_result
+ changed_when:
+ - reconcile_cluster_role_result.stdout != ''
+ - reconcile_cluster_role_result.rc == 0
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -184,19 +188,31 @@
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
- --additive-only=true --confirm
+ --additive-only=true --confirm -o name
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ register: reconcile_bindings_result
+ changed_when:
+ - reconcile_bindings_result.stdout != ''
+ - reconcile_bindings_result.rc == 0
run_once: true
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
+ register: reconcile_jenkins_role_binding_result
+ changed_when:
+ - reconcile_jenkins_role_binding_result.stdout != ''
+ - reconcile_jenkins_role_binding_result.rc == 0
when: openshift.common.version_gte_3_4_or_1_4 | bool
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ register: reconcile_scc_result
+ changed_when:
+ - reconcile_scc_result.stdout != ''
+ - reconcile_scc_result.rc == 0
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
new file mode 100644
index 000000000..13fd917c5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
@@ -0,0 +1,18 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+#
+# oc_objectvalidator provides these two checks
+# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
+# https://github.com/openshift/origin/issues/12697
+# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
+#
+###############################################################################
+- name: Verify 3.5 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..502fd1f46
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,16 @@
+[pytest]
+norecursedirs =
+ .*
+ __pycache__
+ cover
+ docs
+python_files =
+ # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
+ # is Python unittest's default, while pytest discovers both "test_*.py" and
+ # "*_test.py" by default.
+ test_*.py
+ *_tests.py
+addopts =
+ --cov=.
+ --cov-report=term
+ --cov-report=html
diff --git a/requirements.txt b/requirements.txt
index 5a6a161cb..241313b6f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,6 @@
ansible>=2.2
-six
+click
pyOpenSSL
-PyYAML
-ruamel.yaml
+# We need to disable ruamel.yaml for now because of test failures
+#ruamel.yaml
+six
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
index 53ed56abc..9b47cb6ab 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/docker/templates/custom.conf.j2
@@ -1,5 +1,5 @@
# {{ ansible_managed }}
[Unit]
-Requires=iptables.service
+Wants=iptables.service
After=iptables.service
diff --git a/roles/lib_openshift/library/oc_sdnvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index bc7487b95..5a966fa93 100644
--- a/roles/lib_openshift/library/oc_sdnvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -50,14 +50,14 @@ from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: doc/sdnvalidator -*- -*- -*-
+# -*- -*- -*- Begin included fragment: doc/objectvalidator -*- -*- -*-
DOCUMENTATION = '''
---
-module: oc_sdnvalidator
-short_description: Validate SDN objects
+module: oc_objectvalidator
+short_description: Validate OpenShift objects
description:
- - Validate SDN objects
+ - Validate OpenShift objects
options:
kubeconfig:
description:
@@ -71,13 +71,13 @@ extends_documentation_fragment: []
'''
EXAMPLES = '''
-oc_version:
-- name: get oc sdnvalidator
- sdnvalidator:
- register: oc_sdnvalidator
+oc_objectvalidator:
+- name: run oc_objectvalidator
+ oc_objectvalidator:
+ register: oc_objectvalidator
'''
-# -*- -*- -*- End included fragment: doc/sdnvalidator -*- -*- -*-
+# -*- -*- -*- End included fragment: doc/objectvalidator -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
@@ -1307,25 +1307,25 @@ class OpenShiftCLIConfig(object):
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: class/oc_sdnvalidator.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: class/oc_objectvalidator.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
-class OCSDNValidator(OpenShiftCLI):
+class OCObjectValidator(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
def __init__(self, kubeconfig):
- ''' Constructor for OCSDNValidator '''
- # namespace has no meaning for SDN validation, hardcode to 'default'
- super(OCSDNValidator, self).__init__('default', kubeconfig)
+ ''' Constructor for OCObjectValidator '''
+ # namespace has no meaning for object validation, hardcode to 'default'
+ super(OCObjectValidator, self).__init__('default', kubeconfig)
- def get(self, kind, invalid_filter):
- ''' return SDN information '''
+ def get_invalid(self, kind, invalid_filter):
+ ''' return invalid object information '''
rval = self._get(kind)
if rval['returncode'] != 0:
return False, rval, []
- return True, rval, filter(invalid_filter, rval['results'][0]['items'])
+ return True, rval, list(filter(invalid_filter, rval['results'][0]['items'])) # wrap filter with list for py3
# pylint: disable=too-many-return-statements
@staticmethod
@@ -1335,10 +1335,24 @@ class OCSDNValidator(OpenShiftCLI):
params comes from the ansible portion of this module
'''
- sdnvalidator = OCSDNValidator(params['kubeconfig'])
+ objectvalidator = OCObjectValidator(params['kubeconfig'])
all_invalid = {}
failed = False
+ def _is_invalid_namespace(namespace):
+ # check if it uses a reserved name
+ name = namespace['metadata']['name']
+ if not any((name == 'kube',
+ name == 'openshift',
+ name.startswith('kube-'),
+ name.startswith('openshift-'),)):
+ return False
+
+ # determine if the namespace was created by a user
+ if 'annotations' not in namespace['metadata']:
+ return False
+ return 'openshift.io/requester' in namespace['metadata']['annotations']
+
checks = (
(
'hostsubnet',
@@ -1350,10 +1364,15 @@ class OCSDNValidator(OpenShiftCLI):
lambda x: x['metadata']['name'] != x['netname'],
u'netnamespaces where metadata.name != netname',
),
+ (
+ 'namespace',
+ _is_invalid_namespace,
+ u'namespaces that use reserved names and were not created by infrastructure components',
+ ),
)
for resource, invalid_filter, invalid_msg in checks:
- success, rval, invalid = sdnvalidator.get(resource, invalid_filter)
+ success, rval, invalid = objectvalidator.get_invalid(resource, invalid_filter)
if not success:
return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
if invalid:
@@ -1361,17 +1380,26 @@ class OCSDNValidator(OpenShiftCLI):
all_invalid[invalid_msg] = invalid
if failed:
- return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid}
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
- return {'msg': 'All SDN objects are valid.'}
+ return {'msg': 'All objects are valid.'}
-# -*- -*- -*- End included fragment: class/oc_sdnvalidator.py -*- -*- -*-
+# -*- -*- -*- End included fragment: class/oc_objectvalidator.py -*- -*- -*-
-# -*- -*- -*- Begin included fragment: ansible/oc_sdnvalidator.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: ansible/oc_objectvalidator.py -*- -*- -*-
def main():
'''
- ansible oc module for validating OpenShift SDN objects
+ ansible oc module for validating OpenShift objects
'''
module = AnsibleModule(
@@ -1382,7 +1410,7 @@ def main():
)
- rval = OCSDNValidator.run_ansible(module.params)
+ rval = OCObjectValidator.run_ansible(module.params)
if 'failed' in rval:
module.fail_json(**rval)
@@ -1391,4 +1419,4 @@ def main():
if __name__ == '__main__':
main()
-# -*- -*- -*- End included fragment: ansible/oc_sdnvalidator.py -*- -*- -*-
+# -*- -*- -*- End included fragment: ansible/oc_objectvalidator.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
new file mode 100644
index 000000000..812c67de5
--- /dev/null
+++ b/roles/lib_openshift/library/oc_project.py
@@ -0,0 +1,1671 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import copy
+import json
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+# pylint: disable=import-error
+try:
+ import ruamel.yaml as yaml
+except ImportError:
+ import yaml
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/project -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_project
+short_description: Module to manage openshift projects
+description:
+ - Manage openshift projects programmatically.
+options:
+ state:
+ description:
+ - If present, the project will be created if it doesn't exist or update if different. If absent, the project will be removed if present. If list, information about the project will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ display_name:
+ description:
+ - The display name attribute for a project
+ required: false
+ default: None
+ aliases: []
+ description:
+ description:
+ - The description attribute for a project
+ required: false
+ default: None
+ aliases: []
+ admin:
+ description:
+ - The project admin username
+ required: false
+ default: false
+ aliases: []
+ admin_role:
+ description:
+ - The project admin username
+ required: false
+ default: 'admin'
+ aliases: []
+ node_selector:
+ description:
+ - The node selector for this project.
+ - This allows certain pods in this project to run on certain nodes.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create secret
+ oc_project:
+ state: present
+ name: openshift-ops
+ display_name: operations team project
+ node_selector:
+ - top=secret
+ - noncustomer=True
+'''
+
+# -*- -*- -*- End included fragment: doc/project -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# pylint: disable=undefined-variable,missing-docstring
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ raise YeditException("Unexpected item type found while going through key " +
+ "path: {} (at key: {})".format(key, dict_key))
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ raise YeditException("Unexpected item type found while going through key path: {}".format(key))
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ # didn't add/update to an existing list, nor add/update key to a dict
+ # so we must have been provided some syntax like a.b.c[<int>] = "data" for a
+ # non-existent array
+ else:
+ raise YeditException("Error adding to object at path: {}".format(key))
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ tmp_filename = filename + '.yedit'
+
+ with open(tmp_filename, 'w') as yfd:
+ yfd.write(contents)
+
+ os.rename(tmp_filename, filename)
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripDumper if supported.
+ try:
+ Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+ except AttributeError:
+ Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ # Try to use RoundTripLoader if supported.
+ try:
+ self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
+ except AttributeError:
+ self.yaml_dict = yaml.safe_load(contents)
+
+ # Try to set format attributes if supported
+ try:
+ self.yaml_dict.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # AUDIT:maybe-no-member makes sense due to loading data from
+ # a serialized format.
+ # pylint: disable=maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # AUDIT:maybe-no-member makes sense due to fuzzy types
+ # pylint: disable=maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ # Try to use ruamel.yaml and fallback to pyyaml
+ try:
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ except AttributeError:
+ tmp_copy = copy.deepcopy(self.yaml_dict)
+
+ # set the format attributes if available
+ try:
+ tmp_copy.fa.set_block_style()
+ except AttributeError:
+ pass
+
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ ''' Find and return oc binary file '''
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
+ self.all_namespaces = all_namespaces
+ self.oc_binary = locate_oc_binary()
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = Utils.create_tmpfile(rname + '-')
+
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = Utils.create_tmpfile(rname + '-')
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = Utils.create_tmpfile(template_name + '-')
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ curr_env = os.environ.copy()
+ curr_env.update({'KUBECONFIG': self.kubeconfig})
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=curr_env)
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout.decode(), stderr.decode()
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = [self.oc_binary]
+
+ if oadm:
+ cmds.append('adm')
+
+ cmds.extend(cmd)
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ try:
+ returncode, stdout, stderr = self._run(cmds, input_data)
+ except OSError as ex:
+ returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+
+ @staticmethod
+ def _write(filename, contents):
+ ''' Actually write the file contents to disk. This helps with mocking. '''
+
+ with open(filename, 'w') as sfd:
+ sfd.write(contents)
+
+ @staticmethod
+ def create_tmp_file_from_contents(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+
+ tmp = Utils.create_tmpfile(prefix=rname)
+
+ if ftype == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripDumper'):
+ Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
+ else:
+ Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ Utils._write(tmp, json.dumps(data))
+ else:
+ Utils._write(tmp, data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [tmp])
+ return tmp
+
+ @staticmethod
+ def create_tmpfile_copy(inc_file):
+ '''create a temporary copy of a file'''
+ tmpfile = Utils.create_tmpfile('lib_openshift-')
+ Utils._write(tmpfile, open(inc_file).read())
+
+ # Cleanup the tmpfile
+ atexit.register(Utils.cleanup, [tmpfile])
+
+ return tmpfile
+
+ @staticmethod
+ def create_tmpfile(prefix='tmp'):
+ ''' Generates and returns a temporary file name '''
+
+ with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
+ return tmp.name
+
+ @staticmethod
+ def create_tmp_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_tmp_file_from_contents(item['path'] + '-',
+ item['data'],
+ ftype=content_type)
+ files.append({'name': os.path.basename(item['path']),
+ 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
+ # pylint: disable=no-member
+ if hasattr(yaml, 'RoundTripLoader'):
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ else:
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(user_def[key])
+ print(value)
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(user_values)
+ print(api_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/project.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class ProjectConfig(OpenShiftCLIConfig):
+ ''' project config object '''
+ def __init__(self, rname, namespace, kubeconfig, project_options):
+ super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
+
+
+class Project(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ annotations_path = "metadata.annotations"
+ kind = 'Project'
+ annotation_prefix = 'openshift.io/'
+
+ def __init__(self, content):
+ '''Project constructor'''
+ super(Project, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' return the annotations'''
+ return self.get(Project.annotations_path) or {}
+
+ def add_annotations(self, inc_annos):
+ ''' add an annotation to the other annotations'''
+ if not isinstance(inc_annos, list):
+ inc_annos = [inc_annos]
+
+ annos = self.get_annotations()
+ if not annos:
+ self.put(Project.annotations_path, inc_annos)
+ else:
+ for anno in inc_annos:
+ for key, value in anno.items():
+ annos[key] = value
+
+ return True
+
+ def find_annotation(self, key):
+ ''' find an annotation'''
+ annotations = self.get_annotations()
+ for anno in annotations:
+ if Project.annotation_prefix + key == anno:
+ return annotations[anno]
+
+ return None
+
+ def delete_annotation(self, inc_anno_keys):
+ ''' remove an annotation from a project'''
+ if not isinstance(inc_anno_keys, list):
+ inc_anno_keys = [inc_anno_keys]
+
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ removed = False
+ for inc_anno in inc_anno_keys:
+ anno = self.find_annotation(inc_anno)
+ if anno:
+ del annos[Project.annotation_prefix + anno]
+ removed = True
+
+ return removed
+
+ def update_annotation(self, key, value):
+ ''' remove an annotation for a project'''
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ updated = False
+ anno = self.find_annotation(key)
+ if anno:
+ annos[Project.annotation_prefix + key] = value
+ updated = True
+
+ else:
+ self.add_annotations({Project.annotation_prefix + key: value})
+
+ return updated
+
+# -*- -*- -*- End included fragment: lib/project.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_project.py -*- -*- -*-
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProject(OpenShiftCLI):
+ ''' Project Class to manage project/namespace objects'''
+ kind = 'namespace'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCProject '''
+ super(OCProject, self).__init__(None, config.kubeconfig)
+ self.config = config
+ self._project = None
+
+ @property
+ def project(self):
+ ''' property for project'''
+ if not self._project:
+ self.get()
+ return self._project
+
+ @project.setter
+ def project(self, data):
+ ''' setter function for project propeorty'''
+ self._project = data
+
+ def exists(self):
+ ''' return whether a project exists '''
+ if self.project:
+ return True
+
+ return False
+
+ def get(self):
+ '''return project '''
+ result = self._get(self.kind, self.config.name)
+
+ if result['returncode'] == 0:
+ self.project = Project(content=result['results'][0])
+ result['results'] = self.project.yaml_dict
+
+ elif 'namespaces "%s" not found' % self.config.name in result['stderr']:
+ result = {'results': [], 'returncode': 0}
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create a project '''
+ cmd = ['new-project', self.config.name]
+ cmd.extend(self.config.to_option_list())
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def update(self):
+ '''update a project '''
+
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
+
+ # work around for immutable project field
+ if self.config.config_options['node_selector']['value']:
+ self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
+ else:
+ self.project.update_annotation('node-selector', self.project.find_annotation('node-selector'))
+
+ return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
+
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
+
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
+
+ return False
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ _ns = None
+ if params['node_selector'] is not None:
+ _ns = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(params['name'],
+ 'None',
+ params['kubeconfig'],
+ {'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': _ns, 'include': True},
+ })
+
+ oadm_project = OCProject(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oadm_project.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oadm_project.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oadm_project.create()
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oadm_project.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oadm_project.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
+
+# -*- -*- -*- End included fragment: class/oc_project.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_project.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for project
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, require=True, type='str'),
+ display_name=dict(default=None, type='str'),
+ node_selector=dict(default=None, type='list'),
+ description=dict(default=None, type='str'),
+ admin=dict(default=None, type='str'),
+ admin_role=dict(default='admin', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_project.py -*- -*- -*-
diff --git a/roles/lib_openshift/src/ansible/oc_sdnvalidator.py b/roles/lib_openshift/src/ansible/oc_objectvalidator.py
index e91417d63..658bb5ded 100644
--- a/roles/lib_openshift/src/ansible/oc_sdnvalidator.py
+++ b/roles/lib_openshift/src/ansible/oc_objectvalidator.py
@@ -3,7 +3,7 @@
def main():
'''
- ansible oc module for validating OpenShift SDN objects
+ ansible oc module for validating OpenShift objects
'''
module = AnsibleModule(
@@ -14,7 +14,7 @@ def main():
)
- rval = OCSDNValidator.run_ansible(module.params)
+ rval = OCObjectValidator.run_ansible(module.params)
if 'failed' in rval:
module.fail_json(**rval)
diff --git a/roles/lib_openshift/src/ansible/oc_project.py b/roles/lib_openshift/src/ansible/oc_project.py
new file mode 100644
index 000000000..b035cd712
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_project.py
@@ -0,0 +1,33 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for project
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, require=True, type='str'),
+ display_name=dict(default=None, type='str'),
+ node_selector=dict(default=None, type='list'),
+ description=dict(default=None, type='str'),
+ admin=dict(default=None, type='str'),
+ admin_role=dict(default='admin', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCProject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ return module.fail_json(**rval)
+
+ return module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_objectvalidator.py b/roles/lib_openshift/src/class/oc_objectvalidator.py
new file mode 100644
index 000000000..43f6cac67
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_objectvalidator.py
@@ -0,0 +1,86 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCObjectValidator(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ def __init__(self, kubeconfig):
+ ''' Constructor for OCObjectValidator '''
+ # namespace has no meaning for object validation, hardcode to 'default'
+ super(OCObjectValidator, self).__init__('default', kubeconfig)
+
+ def get_invalid(self, kind, invalid_filter):
+ ''' return invalid object information '''
+
+ rval = self._get(kind)
+ if rval['returncode'] != 0:
+ return False, rval, []
+
+ return True, rval, list(filter(invalid_filter, rval['results'][0]['items'])) # wrap filter with list for py3
+
+ # pylint: disable=too-many-return-statements
+ @staticmethod
+ def run_ansible(params):
+ ''' run the idempotent ansible code
+
+ params comes from the ansible portion of this module
+ '''
+
+ objectvalidator = OCObjectValidator(params['kubeconfig'])
+ all_invalid = {}
+ failed = False
+
+ def _is_invalid_namespace(namespace):
+ # check if it uses a reserved name
+ name = namespace['metadata']['name']
+ if not any((name == 'kube',
+ name == 'openshift',
+ name.startswith('kube-'),
+ name.startswith('openshift-'),)):
+ return False
+
+ # determine if the namespace was created by a user
+ if 'annotations' not in namespace['metadata']:
+ return False
+ return 'openshift.io/requester' in namespace['metadata']['annotations']
+
+ checks = (
+ (
+ 'hostsubnet',
+ lambda x: x['metadata']['name'] != x['host'],
+ u'hostsubnets where metadata.name != host',
+ ),
+ (
+ 'netnamespace',
+ lambda x: x['metadata']['name'] != x['netname'],
+ u'netnamespaces where metadata.name != netname',
+ ),
+ (
+ 'namespace',
+ _is_invalid_namespace,
+ u'namespaces that use reserved names and were not created by infrastructure components',
+ ),
+ )
+
+ for resource, invalid_filter, invalid_msg in checks:
+ success, rval, invalid = objectvalidator.get_invalid(resource, invalid_filter)
+ if not success:
+ return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
+ if invalid:
+ failed = True
+ all_invalid[invalid_msg] = invalid
+
+ if failed:
+ return {
+ 'failed': True,
+ 'msg': (
+ "All objects are not valid. If you are a supported customer please contact "
+ "Red Hat Support providing the complete output above. If you are not a customer "
+ "please contact users@lists.openshift.redhat.com for assistance."
+ ),
+ 'state': 'list',
+ 'results': all_invalid
+ }
+
+ return {'msg': 'All objects are valid.'}
diff --git a/roles/lib_openshift/src/class/oc_project.py b/roles/lib_openshift/src/class/oc_project.py
new file mode 100644
index 000000000..7e3984297
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_project.py
@@ -0,0 +1,185 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCProject(OpenShiftCLI):
+ ''' Project Class to manage project/namespace objects'''
+ kind = 'namespace'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCProject '''
+ super(OCProject, self).__init__(None, config.kubeconfig)
+ self.config = config
+ self._project = None
+
+ @property
+ def project(self):
+ ''' property for project'''
+ if not self._project:
+ self.get()
+ return self._project
+
+ @project.setter
+ def project(self, data):
+ ''' setter function for project propeorty'''
+ self._project = data
+
+ def exists(self):
+ ''' return whether a project exists '''
+ if self.project:
+ return True
+
+ return False
+
+ def get(self):
+ '''return project '''
+ result = self._get(self.kind, self.config.name)
+
+ if result['returncode'] == 0:
+ self.project = Project(content=result['results'][0])
+ result['results'] = self.project.yaml_dict
+
+ elif 'namespaces "%s" not found' % self.config.name in result['stderr']:
+ result = {'results': [], 'returncode': 0}
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create a project '''
+ cmd = ['new-project', self.config.name]
+ cmd.extend(self.config.to_option_list())
+
+ return self.openshift_cmd(cmd, oadm=True)
+
+ def update(self):
+ '''update a project '''
+
+ self.project.update_annotation('display-name', self.config.config_options['display_name']['value'])
+ self.project.update_annotation('description', self.config.config_options['description']['value'])
+
+ # work around for immutable project field
+ if self.config.config_options['node_selector']['value']:
+ self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value'])
+ else:
+ self.project.update_annotation('node-selector', self.project.find_annotation('node-selector'))
+
+ return self._replace_content(self.kind, self.config.name, self.project.yaml_dict)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ result = self.project.find_annotation("display-name")
+ if result != self.config.config_options['display_name']['value']:
+ return True
+
+ result = self.project.find_annotation("description")
+ if result != self.config.config_options['description']['value']:
+ return True
+
+ result = self.project.find_annotation("node-selector")
+ if result != self.config.config_options['node_selector']['value']:
+ return True
+
+ return False
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+
+ _ns = None
+ if params['node_selector'] is not None:
+ _ns = ','.join(params['node_selector'])
+
+ pconfig = ProjectConfig(params['name'],
+ 'None',
+ params['kubeconfig'],
+ {'admin': {'value': params['admin'], 'include': True},
+ 'admin_role': {'value': params['admin_role'], 'include': True},
+ 'description': {'value': params['description'], 'include': True},
+ 'display_name': {'value': params['display_name'], 'include': True},
+ 'node_selector': {'value': _ns, 'include': True},
+ })
+
+ oadm_project = OCProject(pconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oadm_project.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': state}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
+
+ api_rval = oadm_project.delete()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'state': state}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oadm_project.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
+
+ # Create it here
+ api_rval = oadm_project.create()
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ ########
+ # Update
+ ########
+ if oadm_project.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = oadm_project.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oadm_project.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': state}
+
+ return {'changed': False, 'results': api_rval, 'state': state}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. [%s]' % state}
diff --git a/roles/lib_openshift/src/class/oc_sdnvalidator.py b/roles/lib_openshift/src/class/oc_sdnvalidator.py
deleted file mode 100644
index da923337b..000000000
--- a/roles/lib_openshift/src/class/oc_sdnvalidator.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# pylint: skip-file
-# flake8: noqa
-
-# pylint: disable=too-many-instance-attributes
-class OCSDNValidator(OpenShiftCLI):
- ''' Class to wrap the oc command line tools '''
-
- def __init__(self, kubeconfig):
- ''' Constructor for OCSDNValidator '''
- # namespace has no meaning for SDN validation, hardcode to 'default'
- super(OCSDNValidator, self).__init__('default', kubeconfig)
-
- def get(self, kind, invalid_filter):
- ''' return SDN information '''
-
- rval = self._get(kind)
- if rval['returncode'] != 0:
- return False, rval, []
-
- return True, rval, filter(invalid_filter, rval['results'][0]['items'])
-
- # pylint: disable=too-many-return-statements
- @staticmethod
- def run_ansible(params):
- ''' run the idempotent ansible code
-
- params comes from the ansible portion of this module
- '''
-
- sdnvalidator = OCSDNValidator(params['kubeconfig'])
- all_invalid = {}
- failed = False
-
- checks = (
- (
- 'hostsubnet',
- lambda x: x['metadata']['name'] != x['host'],
- u'hostsubnets where metadata.name != host',
- ),
- (
- 'netnamespace',
- lambda x: x['metadata']['name'] != x['netname'],
- u'netnamespaces where metadata.name != netname',
- ),
- )
-
- for resource, invalid_filter, invalid_msg in checks:
- success, rval, invalid = sdnvalidator.get(resource, invalid_filter)
- if not success:
- return {'failed': True, 'msg': 'Failed to GET {}.'.format(resource), 'state': 'list', 'results': rval}
- if invalid:
- failed = True
- all_invalid[invalid_msg] = invalid
-
- if failed:
- return {'failed': True, 'msg': 'All SDN objects are not valid.', 'state': 'list', 'results': all_invalid}
-
- return {'msg': 'All SDN objects are valid.'}
diff --git a/roles/lib_openshift/src/doc/sdnvalidator b/roles/lib_openshift/src/doc/objectvalidator
index 0b1862ed1..98861e261 100644
--- a/roles/lib_openshift/src/doc/sdnvalidator
+++ b/roles/lib_openshift/src/doc/objectvalidator
@@ -3,10 +3,10 @@
DOCUMENTATION = '''
---
-module: oc_sdnvalidator
-short_description: Validate SDN objects
+module: oc_objectvalidator
+short_description: Validate OpenShift objects
description:
- - Validate SDN objects
+ - Validate OpenShift objects
options:
kubeconfig:
description:
@@ -20,8 +20,8 @@ extends_documentation_fragment: []
'''
EXAMPLES = '''
-oc_version:
-- name: get oc sdnvalidator
- sdnvalidator:
- register: oc_sdnvalidator
+oc_objectvalidator:
+- name: run oc_objectvalidator
+ oc_objectvalidator:
+ register: oc_objectvalidator
'''
diff --git a/roles/lib_openshift/src/doc/project b/roles/lib_openshift/src/doc/project
new file mode 100644
index 000000000..92efe4320
--- /dev/null
+++ b/roles/lib_openshift/src/doc/project
@@ -0,0 +1,81 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_project
+short_description: Module to manage openshift projects
+description:
+ - Manage openshift projects programmatically.
+options:
+ state:
+ description:
+ - If present, the project will be created if it doesn't exist or update if different. If absent, the project will be removed if present. If list, information about the project will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ display_name:
+ description:
+ - The display name attribute for a project
+ required: false
+ default: None
+ aliases: []
+ description:
+ description:
+ - The description attribute for a project
+ required: false
+ default: None
+ aliases: []
+ admin:
+ description:
+ - The project admin username
+ required: false
+ default: false
+ aliases: []
+ admin_role:
+ description:
+ - The project admin username
+ required: false
+ default: 'admin'
+ aliases: []
+ node_selector:
+ description:
+ - The node selector for this project.
+ - This allows certain pods in this project to run on certain nodes.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create secret
+ oc_project:
+ state: present
+ name: openshift-ops
+ display_name: operations team project
+ node_selector:
+ - top=secret
+ - noncustomer=True
+'''
diff --git a/roles/lib_openshift/src/lib/project.py b/roles/lib_openshift/src/lib/project.py
new file mode 100644
index 000000000..40994741c
--- /dev/null
+++ b/roles/lib_openshift/src/lib/project.py
@@ -0,0 +1,85 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class ProjectConfig(OpenShiftCLIConfig):
+ ''' project config object '''
+ def __init__(self, rname, namespace, kubeconfig, project_options):
+ super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
+
+
+class Project(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ annotations_path = "metadata.annotations"
+ kind = 'Project'
+ annotation_prefix = 'openshift.io/'
+
+ def __init__(self, content):
+ '''Project constructor'''
+ super(Project, self).__init__(content=content)
+
+ def get_annotations(self):
+ ''' return the annotations'''
+ return self.get(Project.annotations_path) or {}
+
+ def add_annotations(self, inc_annos):
+ ''' add an annotation to the other annotations'''
+ if not isinstance(inc_annos, list):
+ inc_annos = [inc_annos]
+
+ annos = self.get_annotations()
+ if not annos:
+ self.put(Project.annotations_path, inc_annos)
+ else:
+ for anno in inc_annos:
+ for key, value in anno.items():
+ annos[key] = value
+
+ return True
+
+ def find_annotation(self, key):
+ ''' find an annotation'''
+ annotations = self.get_annotations()
+ for anno in annotations:
+ if Project.annotation_prefix + key == anno:
+ return annotations[anno]
+
+ return None
+
+ def delete_annotation(self, inc_anno_keys):
+ ''' remove an annotation from a project'''
+ if not isinstance(inc_anno_keys, list):
+ inc_anno_keys = [inc_anno_keys]
+
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ removed = False
+ for inc_anno in inc_anno_keys:
+ anno = self.find_annotation(inc_anno)
+ if anno:
+ del annos[Project.annotation_prefix + anno]
+ removed = True
+
+ return removed
+
+ def update_annotation(self, key, value):
+ ''' remove an annotation for a project'''
+ annos = self.get(Project.annotations_path) or {}
+
+ if not annos:
+ return True
+
+ updated = False
+ anno = self.find_annotation(key)
+ if anno:
+ annos[Project.annotation_prefix + key] = value
+ updated = True
+
+ else:
+ self.add_annotations({Project.annotation_prefix + key: value})
+
+ return updated
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index a0e200d0a..f16b3c8de 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -130,6 +130,17 @@ oc_process.py:
- class/oc_process.py
- ansible/oc_process.py
+oc_project.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/project
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/project.py
+- class/oc_project.py
+- ansible/oc_project.py
+
oc_route.py:
- doc/generated
- doc/license
@@ -207,12 +218,12 @@ oc_version.py:
- class/oc_version.py
- ansible/oc_version.py
-oc_sdnvalidator.py:
+oc_objectvalidator.py:
- doc/generated
- doc/license
- lib/import.py
-- doc/sdnvalidator
+- doc/objectvalidator
- ../../lib_utils/src/class/yedit.py
- lib/base.py
-- class/oc_sdnvalidator.py
-- ansible/oc_sdnvalidator.py
+- class/oc_objectvalidator.py
+- ansible/oc_objectvalidator.py
diff --git a/roles/lib_openshift/src/test/integration/oc_project.yml b/roles/lib_openshift/src/test/integration/oc_project.yml
new file mode 100755
index 000000000..9f700c62c
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_project.yml
@@ -0,0 +1,83 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_project.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create a project
+ oc_project:
+ display_name: operations project
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['name'] == 'operations'"
+ - projout.changed
+ msg: project create failed.
+
+ - name: create a project
+ oc_project:
+ display_name: operations project
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['name'] == 'operations'"
+ - projout.changed == False
+ msg: project create failed.
+
+ - name: update a project
+ oc_project:
+ display_name: operations project one
+ name: operations
+ state: present
+ description: All things operations
+ node_selector:
+ - ops_only=true
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results.results['metadata']['annotations']['openshift.io/display-name'] == 'operations project one'"
+ - projout.changed == True
+ msg: project create failed.
+
+ - name: update a project
+ oc_project:
+ name: operations
+ state: list
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - "projout.results['metadata']['annotations']['openshift.io/display-name'] == 'operations project one'"
+ - projout.changed == False
+ - projout.state == 'list'
+ msg: project list failed.
+
+ - name: delete a project
+ oc_project:
+ name: operations
+ state: absent
+ register: projout
+ - debug: var=projout
+
+ - assert:
+ that:
+ - projout.changed == True
+ msg: project delete failed.
diff --git a/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py b/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py
deleted file mode 100755
index 49e2aadb2..000000000
--- a/roles/lib_openshift/src/test/unit/oc_sdnvalidator.py
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/env python2
-'''
- Unit tests for oc sdnvalidator
-'''
-# To run
-# ./oc_sdnvalidator.py
-#
-# ....
-# ----------------------------------------------------------------------
-# Ran 4 tests in 0.002s
-#
-# OK
-
-import os
-import sys
-import unittest
-import mock
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
-sys.path.insert(0, module_path)
-from oc_sdnvalidator import OCSDNValidator # noqa: E402
-
-
-class OCSDNValidatorTest(unittest.TestCase):
- '''
- Test class for OCSDNValidator
- '''
-
- @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
- @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
- def test_no_data(self, mock_cmd, mock_tmpfile_copy):
- ''' Testing when both SDN objects are empty '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- }
-
- empty = '''{
- "apiVersion": "v1",
- "items": [],
- "kind": "List",
- "metadata": {},
- "resourceVersion": "",
- "selfLink": ""
-}'''
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- # First call to mock
- (0, empty, ''),
-
- # Second call to mock
- (0, empty, ''),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- # Act
- results = OCSDNValidator.run_ansible(params)
-
- # Assert
- self.assertNotIn('failed', results)
- self.assertEqual(results['msg'], 'All SDN objects are valid.')
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
- ])
-
- @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
- @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
- def test_error_code(self, mock_cmd, mock_tmpfile_copy):
- ''' Testing when both we fail to get SDN objects '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- }
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- # First call to mock
- (1, '', 'Error.'),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- error_results = {
- 'returncode': 1,
- 'stderr': 'Error.',
- 'stdout': '',
- 'cmd': 'oc -n default get hostsubnet -o json',
- 'results': [{}]
- }
-
- # Act
- results = OCSDNValidator.run_ansible(params)
-
- # Assert
- self.assertTrue(results['failed'])
- self.assertEqual(results['msg'], 'Failed to GET hostsubnet.')
- self.assertEqual(results['state'], 'list')
- self.assertEqual(results['results'], error_results)
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
- ])
-
- @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
- @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
- def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
- ''' Testing when both SDN objects are valid '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- }
-
- valid_hostsubnet = '''{
- "apiVersion": "v1",
- "items": [
- {
- "apiVersion": "v1",
- "host": "bar0",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:09Z",
- "name": "bar0",
- "namespace": "",
- "resourceVersion": "986",
- "selfLink": "/oapi/v1/hostsubnetsbar0",
- "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- },
- {
- "apiVersion": "v1",
- "host": "bar1",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:18Z",
- "name": "bar1",
- "namespace": "",
- "resourceVersion": "988",
- "selfLink": "/oapi/v1/hostsubnetsbar1",
- "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- },
- {
- "apiVersion": "v1",
- "host": "bar2",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:26Z",
- "name": "bar2",
- "namespace": "",
- "resourceVersion": "991",
- "selfLink": "/oapi/v1/hostsubnetsbar2",
- "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- }
- ],
- "kind": "List",
- "metadata": {},
- "resourceVersion": "",
- "selfLink": ""
- }'''
-
- valid_netnamespace = '''{
- "apiVersion": "v1",
- "items": [
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:16Z",
- "name": "foo0",
- "namespace": "",
- "resourceVersion": "959",
- "selfLink": "/oapi/v1/netnamespacesfoo0",
- "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo0"
- },
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:26Z",
- "name": "foo1",
- "namespace": "",
- "resourceVersion": "962",
- "selfLink": "/oapi/v1/netnamespacesfoo1",
- "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo1"
- },
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:36Z",
- "name": "foo2",
- "namespace": "",
- "resourceVersion": "965",
- "selfLink": "/oapi/v1/netnamespacesfoo2",
- "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo2"
- }
- ],
- "kind": "List",
- "metadata": {},
- "resourceVersion": "",
- "selfLink": ""
- }'''
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- # First call to mock
- (0, valid_hostsubnet, ''),
-
- # Second call to mock
- (0, valid_netnamespace, ''),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- # Act
- results = OCSDNValidator.run_ansible(params)
-
- # Assert
- self.assertNotIn('failed', results)
- self.assertEqual(results['msg'], 'All SDN objects are valid.')
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
- ])
-
- @mock.patch('oc_sdnvalidator.Utils.create_tmpfile_copy')
- @mock.patch('oc_sdnvalidator.OCSDNValidator._run')
- def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
- ''' Testing when both SDN objects are invalid '''
-
- # Arrange
-
- # run_ansible input parameters
- params = {
- 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
- }
-
- invalid_hostsubnet = '''{
- "apiVersion": "v1",
- "items": [
- {
- "apiVersion": "v1",
- "host": "bar0",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:09Z",
- "name": "bar0",
- "namespace": "",
- "resourceVersion": "986",
- "selfLink": "/oapi/v1/hostsubnetsbar0",
- "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- },
- {
- "apiVersion": "v1",
- "host": "bar1",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:18Z",
- "name": "bar1",
- "namespace": "",
- "resourceVersion": "988",
- "selfLink": "/oapi/v1/hostsubnetsbar1",
- "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- },
- {
- "apiVersion": "v1",
- "host": "bar2",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:26Z",
- "name": "bar2",
- "namespace": "",
- "resourceVersion": "991",
- "selfLink": "/oapi/v1/hostsubnetsbar2",
- "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- },
- {
- "apiVersion": "v1",
- "host": "baz1",
- "hostIP": "1.1.1.1",
- "kind": "HostSubnet",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:47:49Z",
- "name": "baz0",
- "namespace": "",
- "resourceVersion": "996",
- "selfLink": "/oapi/v1/hostsubnetsbaz0",
- "uid": "69f75f87-f478-11e6-aae0-507b9dac97ff"
- },
- "subnet": "1.1.0.0/24"
- }
- ],
- "kind": "List",
- "metadata": {},
- "resourceVersion": "",
- "selfLink": ""
-}'''
-
- invalid_netnamespace = '''{
- "apiVersion": "v1",
- "items": [
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:52Z",
- "name": "bar0",
- "namespace": "",
- "resourceVersion": "969",
- "selfLink": "/oapi/v1/netnamespacesbar0",
- "uid": "245d416e-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "bar1"
- },
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:16Z",
- "name": "foo0",
- "namespace": "",
- "resourceVersion": "959",
- "selfLink": "/oapi/v1/netnamespacesfoo0",
- "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo0"
- },
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:26Z",
- "name": "foo1",
- "namespace": "",
- "resourceVersion": "962",
- "selfLink": "/oapi/v1/netnamespacesfoo1",
- "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo1"
- },
- {
- "apiVersion": "v1",
- "kind": "NetNamespace",
- "metadata": {
- "creationTimestamp": "2017-02-16T18:45:36Z",
- "name": "foo2",
- "namespace": "",
- "resourceVersion": "965",
- "selfLink": "/oapi/v1/netnamespacesfoo2",
- "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
- },
- "netid": 100,
- "netname": "foo2"
- }
- ],
- "kind": "List",
- "metadata": {},
- "resourceVersion": "",
- "selfLink": ""
-}'''
-
- invalid_results = {
- 'hostsubnets where metadata.name != host': [{
- 'apiVersion': 'v1',
- 'host': 'baz1',
- 'hostIP': '1.1.1.1',
- 'kind': 'HostSubnet',
- 'metadata': {
- 'creationTimestamp': '2017-02-16T18:47:49Z',
- 'name': 'baz0',
- 'namespace': '',
- 'resourceVersion': '996',
- 'selfLink': '/oapi/v1/hostsubnetsbaz0',
- 'uid': '69f75f87-f478-11e6-aae0-507b9dac97ff'
- },
- 'subnet': '1.1.0.0/24'
- }],
- 'netnamespaces where metadata.name != netname': [{
- 'apiVersion': 'v1',
- 'kind': 'NetNamespace',
- 'metadata': {
- 'creationTimestamp': '2017-02-16T18:45:52Z',
- 'name': 'bar0',
- 'namespace': '',
- 'resourceVersion': '969',
- 'selfLink': '/oapi/v1/netnamespacesbar0',
- 'uid': '245d416e-f478-11e6-aae0-507b9dac97ff'
- },
- 'netid': 100,
- 'netname': 'bar1'
- }],
- }
-
- # Return values of our mocked function call. These get returned once per call.
- mock_cmd.side_effect = [
- # First call to mock
- (0, invalid_hostsubnet, ''),
-
- # Second call to mock
- (0, invalid_netnamespace, ''),
- ]
-
- mock_tmpfile_copy.side_effect = [
- '/tmp/mocked_kubeconfig',
- ]
-
- # Act
- results = OCSDNValidator.run_ansible(params)
-
- # Assert
- self.assertTrue(results['failed'])
- self.assertEqual(results['msg'], 'All SDN objects are not valid.')
- self.assertEqual(results['state'], 'list')
- self.assertEqual(results['results'], invalid_results)
-
- # Making sure our mock was called as we expected
- mock_cmd.assert_has_calls([
- mock.call(['oc', '-n', 'default', 'get', 'hostsubnet', '-o', 'json'], None),
- mock.call(['oc', '-n', 'default', 'get', 'netnamespace', '-o', 'json'], None),
- ])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py b/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
index 761c849fb..27d98b869 100755
--- a/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
+++ b/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oadm_manage_node
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 2 tests in 0.001s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class ManageNodeTest(unittest.TestCase):
Test class for oadm_manage_node
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
@mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
def test_list_pods(self, mock_openshift_cmd, mock_tmpfile_copy):
@@ -287,11 +275,3 @@ class ManageNodeTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_env.py b/roles/lib_openshift/src/test/unit/test_oc_env.py
index 45a3ef1eb..2f416c05e 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_env.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_env.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc_env
'''
-# To run:
-# ./oc_env.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCEnvTest(unittest.TestCase):
Test class for OCEnv
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_env.locate_oc_binary')
@mock.patch('oc_env.Utils.create_tmpfile_copy')
@mock.patch('oc_env.OCEnv._run')
@@ -558,11 +546,3 @@ class OCEnvTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_label.py b/roles/lib_openshift/src/test/unit/test_oc_label.py
index 933b5f221..5453266c1 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_label.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_label.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc label
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCLabelTest(unittest.TestCase):
Test class for OCLabel
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_label.Utils.create_tmpfile_copy')
@mock.patch('oc_label.OCLabel._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -295,11 +283,3 @@ class OCLabelTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
new file mode 100755
index 000000000..da326742f
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py
@@ -0,0 +1,903 @@
+'''
+ Unit tests for oc_objectvalidator
+'''
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_objectvalidator import OCObjectValidator # noqa: E402
+
+
+class OCObjectValidatorTest(unittest.TestCase):
+ '''
+ Test class for OCObjectValidator
+ '''
+
+ maxDiff = None
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_no_data(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both all objects are empty '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ empty = '''{
+ "apiVersion": "v1",
+ "items": [],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, empty, ''),
+
+ # Second call to mock
+ (0, empty, ''),
+
+ # Third call to mock
+ (0, empty, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_error_code(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when we fail to get objects '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error.'),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ error_results = {
+ 'returncode': 1,
+ 'stderr': 'Error.',
+ 'stdout': '',
+ 'cmd': 'oc get hostsubnet -o json -n default',
+ 'results': [{}]
+ }
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertEqual(results['msg'], 'Failed to GET hostsubnet.')
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], error_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_valid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when both all objects are valid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ valid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ valid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+ }'''
+
+ valid_namespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c1,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000000000/10000",
+ "openshift.io/sa.scc.uid-range": "1000000000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "default",
+ "namespace": "",
+ "resourceVersion": "165",
+ "selfLink": "/api/v1/namespacesdefault",
+ "uid": "23c0c6aa-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "kube-system",
+ "namespace": "",
+ "resourceVersion": "533",
+ "selfLink": "/api/v1/namespaceskube-system",
+ "uid": "23c21758-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer",
+ "openshift.io/sa.scc.mcs": "s0:c9,c4",
+ "openshift.io/sa.scc.supplemental-groups": "1000080000/10000",
+ "openshift.io/sa.scc.uid-range": "1000080000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:16Z",
+ "name": "myproject",
+ "namespace": "",
+ "resourceVersion": "2898",
+ "selfLink": "/api/v1/namespacesmyproject",
+ "uid": "5ae3764d-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c6,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000030000/10000",
+ "openshift.io/sa.scc.uid-range": "1000030000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift",
+ "namespace": "",
+ "resourceVersion": "171",
+ "selfLink": "/api/v1/namespacesopenshift",
+ "uid": "24f7b34d-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c5,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000020000/10000",
+ "openshift.io/sa.scc.uid-range": "1000020000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift-infra",
+ "namespace": "",
+ "resourceVersion": "169",
+ "selfLink": "/api/v1/namespacesopenshift-infra",
+ "uid": "24a2ed75-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer1",
+ "openshift.io/sa.scc.mcs": "s0:c10,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000090000/10000",
+ "openshift.io/sa.scc.uid-range": "1000090000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:56Z",
+ "name": "yourproject",
+ "namespace": "",
+ "resourceVersion": "2955",
+ "selfLink": "/api/v1/namespacesyourproject",
+ "uid": "72df7fb9-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, valid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, valid_netnamespace, ''),
+
+ # Third call to mock
+ (0, valid_namespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertNotIn('failed', results)
+ self.assertEqual(results['msg'], 'All objects are valid.')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
+
+ @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy')
+ @mock.patch('oc_objectvalidator.OCObjectValidator._run')
+ def test_invalid_both(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing when all objects are invalid '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ }
+
+ invalid_hostsubnet = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "host": "bar0",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:09Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "986",
+ "selfLink": "/oapi/v1/hostsubnetsbar0",
+ "uid": "528dbb41-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:18Z",
+ "name": "bar1",
+ "namespace": "",
+ "resourceVersion": "988",
+ "selfLink": "/oapi/v1/hostsubnetsbar1",
+ "uid": "57710d84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "bar2",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:26Z",
+ "name": "bar2",
+ "namespace": "",
+ "resourceVersion": "991",
+ "selfLink": "/oapi/v1/hostsubnetsbar2",
+ "uid": "5c59a28c-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ },
+ {
+ "apiVersion": "v1",
+ "host": "baz1",
+ "hostIP": "1.1.1.1",
+ "kind": "HostSubnet",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:47:49Z",
+ "name": "baz0",
+ "namespace": "",
+ "resourceVersion": "996",
+ "selfLink": "/oapi/v1/hostsubnetsbaz0",
+ "uid": "69f75f87-f478-11e6-aae0-507b9dac97ff"
+ },
+ "subnet": "1.1.0.0/24"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_netnamespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:52Z",
+ "name": "bar0",
+ "namespace": "",
+ "resourceVersion": "969",
+ "selfLink": "/oapi/v1/netnamespacesbar0",
+ "uid": "245d416e-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "bar1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:16Z",
+ "name": "foo0",
+ "namespace": "",
+ "resourceVersion": "959",
+ "selfLink": "/oapi/v1/netnamespacesfoo0",
+ "uid": "0f1c85b2-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo0"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:26Z",
+ "name": "foo1",
+ "namespace": "",
+ "resourceVersion": "962",
+ "selfLink": "/oapi/v1/netnamespacesfoo1",
+ "uid": "14effa0d-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo1"
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "NetNamespace",
+ "metadata": {
+ "creationTimestamp": "2017-02-16T18:45:36Z",
+ "name": "foo2",
+ "namespace": "",
+ "resourceVersion": "965",
+ "selfLink": "/oapi/v1/netnamespacesfoo2",
+ "uid": "1aabdf84-f478-11e6-aae0-507b9dac97ff"
+ },
+ "netid": 100,
+ "netname": "foo2"
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_namespace = '''{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c1,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000000000/10000",
+ "openshift.io/sa.scc.uid-range": "1000000000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "default",
+ "namespace": "",
+ "resourceVersion": "165",
+ "selfLink": "/api/v1/namespacesdefault",
+ "uid": "23c0c6aa-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/requester": "",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:49Z",
+ "name": "kube-system",
+ "namespace": "",
+ "resourceVersion": "3052",
+ "selfLink": "/api/v1/namespaceskube-system",
+ "uid": "23c21758-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer",
+ "openshift.io/sa.scc.mcs": "s0:c9,c4",
+ "openshift.io/sa.scc.supplemental-groups": "1000080000/10000",
+ "openshift.io/sa.scc.uid-range": "1000080000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:16Z",
+ "name": "myproject",
+ "namespace": "",
+ "resourceVersion": "2898",
+ "selfLink": "/api/v1/namespacesmyproject",
+ "uid": "5ae3764d-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/requester": "",
+ "openshift.io/sa.scc.mcs": "s0:c6,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000030000/10000",
+ "openshift.io/sa.scc.uid-range": "1000030000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift",
+ "namespace": "",
+ "resourceVersion": "3057",
+ "selfLink": "/api/v1/namespacesopenshift",
+ "uid": "24f7b34d-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "system:admin",
+ "openshift.io/sa.scc.mcs": "s0:c10,c5",
+ "openshift.io/sa.scc.supplemental-groups": "1000100000/10000",
+ "openshift.io/sa.scc.uid-range": "1000100000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:21:15Z",
+ "name": "openshift-fancy",
+ "namespace": "",
+ "resourceVersion": "3072",
+ "selfLink": "/api/v1/namespacesopenshift-fancy",
+ "uid": "e958063c-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/sa.scc.mcs": "s0:c5,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000020000/10000",
+ "openshift.io/sa.scc.uid-range": "1000020000/10000"
+ },
+ "creationTimestamp": "2017-03-02T00:49:51Z",
+ "name": "openshift-infra",
+ "namespace": "",
+ "resourceVersion": "169",
+ "selfLink": "/api/v1/namespacesopenshift-infra",
+ "uid": "24a2ed75-fee2-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Namespace",
+ "metadata": {
+ "annotations": {
+ "openshift.io/description": "",
+ "openshift.io/display-name": "",
+ "openshift.io/requester": "developer1",
+ "openshift.io/sa.scc.mcs": "s0:c10,c0",
+ "openshift.io/sa.scc.supplemental-groups": "1000090000/10000",
+ "openshift.io/sa.scc.uid-range": "1000090000/10000"
+ },
+ "creationTimestamp": "2017-03-02T02:17:56Z",
+ "name": "yourproject",
+ "namespace": "",
+ "resourceVersion": "2955",
+ "selfLink": "/api/v1/namespacesyourproject",
+ "uid": "72df7fb9-feee-11e6-b45a-507b9dac97ff"
+ },
+ "spec": {
+ "finalizers": [
+ "openshift.io/origin",
+ "kubernetes"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {},
+ "resourceVersion": "",
+ "selfLink": ""
+}'''
+
+ invalid_results = {
+ 'hostsubnets where metadata.name != host': [{
+ 'apiVersion': 'v1',
+ 'host': 'baz1',
+ 'hostIP': '1.1.1.1',
+ 'kind': 'HostSubnet',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:47:49Z',
+ 'name': 'baz0',
+ 'namespace': '',
+ 'resourceVersion': '996',
+ 'selfLink': '/oapi/v1/hostsubnetsbaz0',
+ 'uid': '69f75f87-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'subnet': '1.1.0.0/24'
+ }],
+ 'netnamespaces where metadata.name != netname': [{
+ 'apiVersion': 'v1',
+ 'kind': 'NetNamespace',
+ 'metadata': {
+ 'creationTimestamp': '2017-02-16T18:45:52Z',
+ 'name': 'bar0',
+ 'namespace': '',
+ 'resourceVersion': '969',
+ 'selfLink': '/oapi/v1/netnamespacesbar0',
+ 'uid': '245d416e-f478-11e6-aae0-507b9dac97ff'
+ },
+ 'netid': 100,
+ 'netname': 'bar1'
+ }],
+ 'namespaces that use reserved names and were not created by infrastructure components': [{
+ 'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/requester': '',
+ 'openshift.io/sa.scc.mcs': 's0:c3,c2',
+ 'openshift.io/sa.scc.supplemental-groups': '1000010000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000010000/10000'},
+ 'creationTimestamp': '2017-03-02T00:49:49Z',
+ 'name': 'kube-system',
+ 'namespace': '',
+ 'resourceVersion': '3052',
+ 'selfLink': '/api/v1/namespaceskube-system',
+ 'uid': '23c21758-fee2-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['kubernetes', 'openshift.io/origin']},
+ 'status': {'phase': 'Active'}},
+ {'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/requester': '',
+ 'openshift.io/sa.scc.mcs': 's0:c6,c0',
+ 'openshift.io/sa.scc.supplemental-groups': '1000030000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000030000/10000'},
+ 'creationTimestamp': '2017-03-02T00:49:51Z',
+ 'name': 'openshift',
+ 'namespace': '',
+ 'resourceVersion': '3057',
+ 'selfLink': '/api/v1/namespacesopenshift',
+ 'uid': '24f7b34d-fee2-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['kubernetes', 'openshift.io/origin']},
+ 'status': {'phase': 'Active'}},
+ {'apiVersion': 'v1',
+ 'kind': 'Namespace',
+ 'metadata': {'annotations': {'openshift.io/description': '',
+ 'openshift.io/display-name': '',
+ 'openshift.io/requester': 'system:admin',
+ 'openshift.io/sa.scc.mcs': 's0:c10,c5',
+ 'openshift.io/sa.scc.supplemental-groups': '1000100000/10000',
+ 'openshift.io/sa.scc.uid-range': '1000100000/10000'},
+ 'creationTimestamp': '2017-03-02T02:21:15Z',
+ 'name': 'openshift-fancy',
+ 'namespace': '',
+ 'resourceVersion': '3072',
+ 'selfLink': '/api/v1/namespacesopenshift-fancy',
+ 'uid': 'e958063c-feee-11e6-b45a-507b9dac97ff'},
+ 'spec': {'finalizers': ['openshift.io/origin', 'kubernetes']},
+ 'status': {'phase': 'Active'}
+ }],
+ }
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (0, invalid_hostsubnet, ''),
+
+ # Second call to mock
+ (0, invalid_netnamespace, ''),
+
+ # Third call to mock
+ (0, invalid_namespace, ''),
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ # Act
+ results = OCObjectValidator.run_ansible(params)
+
+ # Assert
+ self.assertTrue(results['failed'])
+ self.assertIn('All objects are not valid.', results['msg'])
+ self.assertEqual(results['state'], 'list')
+ self.assertEqual(results['results'], invalid_results)
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'netnamespace', '-o', 'json', '-n', 'default'], None),
+ mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None),
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_process.py b/roles/lib_openshift/src/test/unit/test_oc_process.py
index c4b36928b..d887f7636 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_process.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_process.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc process
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -254,10 +246,6 @@ class OCProcessTest(unittest.TestCase):
}
}'''
- def setUp(self):
- ''' setup method will set to known configuration '''
- pass
-
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -582,11 +570,3 @@ class OCProcessTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_project.py b/roles/lib_openshift/src/test/unit/test_oc_project.py
new file mode 100755
index 000000000..5155101cb
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/test_oc_project.py
@@ -0,0 +1,110 @@
+'''
+ Unit tests for oc project
+'''
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_project import OCProject # noqa: E402
+
+
+class OCProjectTest(unittest.TestCase):
+ '''
+ Test class for OCSecret
+ '''
+
+ @mock.patch('oc_project.locate_oc_binary')
+ @mock.patch('oc_project.Utils.create_tmpfile_copy')
+ @mock.patch('oc_project.Utils._write')
+ @mock.patch('oc_project.OCProject._run')
+ def test_adding_a_project(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_oc_bin):
+ ''' Testing adding a project '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'state': 'present',
+ 'display_name': 'operations project',
+ 'name': 'operations',
+ 'node_selector': ['ops_only=True'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False,
+ 'admin': None,
+ 'admin_role': 'admin',
+ 'description': 'All things operations project',
+ }
+
+ project_results = '''{
+ "kind": "Project",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "operations",
+ "selfLink": "/oapi/v1/projects/operations",
+ "uid": "5e52afb8-ee33-11e6-89f4-0edc441d9666",
+ "resourceVersion": "1584",
+ "labels": {},
+ "annotations": {
+ "openshift.io/node-selector": "ops_only=True",
+ "openshift.io/sa.initialized-roles": "true",
+ "openshift.io/sa.scc.mcs": "s0:c3,c2",
+ "openshift.io/sa.scc.supplemental-groups": "1000010000/10000",
+ "openshift.io/sa.scc.uid-range": "1000010000/10000"
+ }
+ },
+ "spec": {
+ "finalizers": [
+ "kubernetes",
+ "openshift.io/origin"
+ ]
+ },
+ "status": {
+ "phase": "Active"
+ }
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: namespaces "operations" not found'),
+ (1, '', 'Error from server: namespaces "operations" not found'),
+ (0, '', ''), # created
+ (0, project_results, ''), # fetch it
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ mock_loc_oc_bin.side_effect = [
+ 'oc',
+ ]
+
+ # Act
+
+ results = OCProject.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['results']['metadata']['name'], 'operations')
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+ mock.call(['oc', 'adm', 'new-project', 'operations', mock.ANY,
+ mock.ANY, mock.ANY, mock.ANY], None),
+ mock.call(['oc', 'get', 'namespace', 'operations', '-o', 'json'], None),
+
+ ])
diff --git a/roles/lib_openshift/src/test/unit/test_oc_route.py b/roles/lib_openshift/src/test/unit/test_oc_route.py
index e0f6d2f3c..09c52a461 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_route.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_route.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc route
'''
-# To run:
-# ./oc_serviceaccount.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCRouteTest(unittest.TestCase):
Test class for OCServiceAccount
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_route.locate_oc_binary')
@mock.patch('oc_route.Utils.create_tmpfile_copy')
@mock.patch('oc_route.OCRoute._run')
@@ -377,11 +365,3 @@ metadata:
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_scale.py b/roles/lib_openshift/src/test/unit/test_oc_scale.py
index b2dec2fbe..d810735f2 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_scale.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc scale
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCScaleTest(unittest.TestCase):
Test class for OCVersion
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_scale.Utils.create_tmpfile_copy')
@mock.patch('oc_scale.OCScale.openshift_cmd')
def test_state_list(self, mock_openshift_cmd, mock_tmpfile_copy):
@@ -266,11 +254,3 @@ class OCScaleTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index bf496769a..e31393793 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc secret
'''
-# To run:
-# ./oc_secret.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCSecretTest(unittest.TestCase):
Test class for OCSecret
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_secret.locate_oc_binary')
@mock.patch('oc_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_secret.Utils._write')
@@ -200,11 +188,3 @@ class OCSecretTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index 8974eb6c6..e74c66665 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc service
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -33,10 +25,6 @@ class OCServiceTest(unittest.TestCase):
Test class for OCService
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_service.Utils.create_tmpfile_copy')
@mock.patch('oc_service.OCService._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
@@ -315,11 +303,3 @@ class OCServiceTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
index 3572a6728..5772d2f00 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc serviceaccount
'''
-# To run:
-# ./oc_serviceaccount.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCServiceAccountTest(unittest.TestCase):
Test class for OCServiceAccount
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_serviceaccount.locate_oc_binary')
@mock.patch('oc_serviceaccount.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount.OCServiceAccount._run')
@@ -223,11 +211,3 @@ class OCServiceAccountTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
index d78349e34..b22525068 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_serviceaccount_secret.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc secret add
'''
-# To run:
-# ./oc_serviceaccount_secret.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import six
@@ -38,10 +30,6 @@ class OCServiceAccountSecretTest(unittest.TestCase):
Test class for OCServiceAccountSecret
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_serviceaccount_secret.locate_oc_binary')
@mock.patch('oc_serviceaccount_secret.Utils.create_tmpfile_copy')
@mock.patch('oc_serviceaccount_secret.Yedit._write')
@@ -424,11 +412,3 @@ secrets:
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/test_oc_version.py b/roles/lib_openshift/src/test/unit/test_oc_version.py
index 6daf5b00d..c287bad0b 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_version.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_version.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for oc version
'''
-# To run
-# python -m unittest version
-#
-# .
-# Ran 1 test in 0.597s
-#
-# OK
import os
import six
@@ -32,10 +24,6 @@ class OCVersionTest(unittest.TestCase):
Test class for OCVersion
'''
- def setUp(self):
- ''' setup method will create a file and set to known configuration '''
- pass
-
@mock.patch('oc_version.Utils.create_tmpfile_copy')
@mock.patch('oc_version.OCVersion.openshift_cmd')
def test_get(self, mock_openshift_cmd, mock_tmpfile_copy):
@@ -172,11 +160,3 @@ class OCVersionTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_utils/src/test/unit/test_repoquery.py b/roles/lib_utils/src/test/unit/test_repoquery.py
index c487ab254..e39d9d83f 100755
--- a/roles/lib_utils/src/test/unit/test_repoquery.py
+++ b/roles/lib_utils/src/test/unit/test_repoquery.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for repoquery
'''
-# To run:
-# ./repoquery.py
-#
-# .
-# Ran 1 test in 0.002s
-#
-# OK
import os
import sys
@@ -31,10 +23,6 @@ class RepoQueryTest(unittest.TestCase):
Test class for RepoQuery
'''
- def setUp(self):
- ''' setup method for other tests '''
- pass
-
@mock.patch('repoquery._run')
def test_querying_a_package(self, mock_cmd):
''' Testing querying a package '''
@@ -77,11 +65,3 @@ class RepoQueryTest(unittest.TestCase):
mock_cmd.assert_has_calls([
mock.call(['/usr/bin/repoquery', '--plugins', '--quiet', '--pkgnarrow=repos', '--queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}', 'bash']),
])
-
- def tearDown(self):
- '''TearDown method'''
- pass
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/lib_utils/src/test/unit/test_yedit.py b/roles/lib_utils/src/test/unit/test_yedit.py
index ce5e027a7..23a3f7353 100755
--- a/roles/lib_utils/src/test/unit/test_yedit.py
+++ b/roles/lib_utils/src/test/unit/test_yedit.py
@@ -1,14 +1,6 @@
-#!/usr/bin/env python2
'''
Unit tests for yedit
'''
-# To run
-# python -m unittest yedit_test
-#
-# .............................
-# ----------------------------------------------------------------------
-# Ran 29 tests in 0.133s
-# OK
import os
import sys
@@ -200,8 +192,6 @@ class YeditTest(unittest.TestCase):
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
- # pylint: disable=maybe-no-member
- self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
@@ -290,7 +280,3 @@ class YeditTest(unittest.TestCase):
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/roles/openshift_excluder/README.md b/roles/openshift_excluder/README.md
index 6c90b4e96..e76a15952 100644
--- a/roles/openshift_excluder/README.md
+++ b/roles/openshift_excluder/README.md
@@ -15,8 +15,11 @@ Facts
| Name | Default Value | Description |
-----------------------------|---------------|----------------------------------------|
-| docker_excluder_enabled | none | Records the status of docker excluder |
-| openshift_excluder_enabled | none | Records the status of the openshift excluder |
+| enable_docker_excluder | enable_excluders | Enable docker excluder. If not set, the docker excluder is ignored. |
+| enable_openshift_excluder | enable_excluders | Enable openshift excluder. If not set, the openshift excluder is ignored. |
+| enable_excluders | None | Enable all excluders
+| enable_docker_excluder_override | None | indication the docker excluder needs to be enabled |
+| disable_openshift_excluder_override | None | indication the openshift excluder needs to be disabled |
Role Variables
--------------
@@ -25,6 +28,16 @@ None
Dependencies
------------
+Tasks to include
+----------------
+
+- exclude: enable excluders (assuming excluders are installed)
+- unexclude: disable excluders (assuming excluders are installed)
+- install: install excluders (installation is followed by excluder enabling)
+- enable: enable excluders (optionally with installation step)
+- disabled: disable excluders (optionally with installation and status step, the status check that can override which excluder gets enabled/disabled)
+- status: determine status of excluders
+
Example Playbook
----------------
diff --git a/roles/openshift_excluder/defaults/main.yml b/roles/openshift_excluder/defaults/main.yml
new file mode 100644
index 000000000..0d275e954
--- /dev/null
+++ b/roles/openshift_excluder/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+# keep the 'current' package or update to 'latest' if available?
+openshift_excluder_package_state: present
+docker_excluder_package_state: present
diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml
new file mode 100644
index 000000000..6f4070c3d
--- /dev/null
+++ b/roles/openshift_excluder/tasks/adjust.yml
@@ -0,0 +1,23 @@
+---
+# Depending on enablement of individual excluders and their status
+# some excluders needs to be disabled, resp. enabled
+# By default, all excluders are disabled unless overrided.
+- block:
+ - include: init.yml
+ # All excluders that are to be enabled are enabled
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided
+ enable_docker_excluder: "{{ enable_docker_excluder_override | default(false) | bool }}"
+ # excluder is to be disabled by default
+ enable_openshift_excluder: false
+ # All excluders that are to be disabled are disabled
+ - include: unexclude.yml
+ vars:
+ # If the docker override is not set, default to the generic behaviour
+ disable_docker_excluder: "{{ not enable_docker_excluder_override | default(not docker_excluder_on) | bool }}"
+ # disable openshift excluder is never overrided to be enabled
+ # disable it if the docker excluder is enabled
+ disable_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when:
+ - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
new file mode 100644
index 000000000..a8deb3eb1
--- /dev/null
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -0,0 +1,26 @@
+---
+# input variables
+# - with_status_check
+# - with_install
+# - excluder_package_state
+# - docker_excluder_package_state
+- include: init.yml
+
+# Install any excluder that is enabled
+- include: install.yml
+ vars:
+ # Both docker_excluder_on and openshift_excluder_on are set in openshift_excluder->init task
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: docker_excluder_on or openshift_excluder_on
+
+ # if the docker excluder is not enabled, we don't care about its status
+ # it the docker excluder is enabled, we install it and in case its status is non-zero
+ # it is enabled no matter what
+
+# Check the current state of all excluders
+- include: status.yml
+ when: with_status_check | default(docker_excluder_on or openshift_excluder_on) | bool
+
+ # And finally adjust an excluder in order to update host components correctly
+- include: adjust.yml
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
new file mode 100644
index 000000000..ef6fc4a01
--- /dev/null
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -0,0 +1,21 @@
+---
+# input variables:
+# - with_install
+- block:
+ - include: init.yml
+
+ - include: install.yml
+ vars:
+ install_docker_excluder: "{{ docker_excluder_on | bool }}"
+ install_openshift_excluder: "{{ openshift_excluder_on | bool }}"
+ when: with_install | default(docker_excluder_on or openshift_excluder_on) | bool
+
+ - include: exclude.yml
+ vars:
+ # Enable the docker excluder only if it is overrided, resp. enabled by default (in that order)
+ enable_docker_excluder: "{{ enable_docker_excluder_override | default(docker_excluder_on) | bool }}"
+ # Enable the openshift excluder only if it is not overrided, resp. enabled by default (in that order)
+ enable_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}"
+
+ when:
+ - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml
index 570183aef..ee0ad8a0b 100644
--- a/roles/openshift_excluder/tasks/exclude.yml
+++ b/roles/openshift_excluder/tasks/exclude.yml
@@ -1,11 +1,20 @@
---
-- include: install.yml
- when: not openshift.common.is_containerized | bool
+# input variables:
+# - enable_docker_excluder
+# - enable_openshift_excluder
+- block:
+ - name: Enable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder exclude"
+ # if the docker override is set, it means the docker excluder needs to be enabled no matter what
+ # if the docker override is not set, the excluder is set based on enable_docker_excluder
+ when:
+ - enable_docker_excluder | default(false) | bool
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when: not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when: not openshift.common.is_containerized | bool
+ - name: Enable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder exclude"
+ # if the openshift override is set, it means the openshift excluder is disabled no matter what
+ # if the openshift override is not set, the excluder is set based on enable_openshift_excluder
+ when:
+ - enable_openshift_excluder | default(false) | bool
+ when:
+ - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/init.yml b/roles/openshift_excluder/tasks/init.yml
new file mode 100644
index 000000000..dee779925
--- /dev/null
+++ b/roles/openshift_excluder/tasks/init.yml
@@ -0,0 +1,12 @@
+---
+- name: Evalute if docker excluder is to be enabled
+ set_fact:
+ docker_excluder_on: "{{ enable_docker_excluder | default(enable_excluders | default(false)) | bool }}"
+
+- debug: var=docker_excluder_on
+
+- name: Evalute if openshift excluder is to be enabled
+ set_fact:
+ openshift_excluder_on: "{{ enable_openshift_excluder | default(enable_excluders | default(false)) | bool }}"
+
+- debug: var=openshift_excluder_on
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index ee4cb2c05..01fe5da55 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -1,16 +1,21 @@
---
-- name: Install latest excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
+# input Variables
+# - install_docker_excluder
+# - install_openshift_excluder
+- block:
+
+ - name: Install docker excluder
+ package:
+ name: "{{ openshift.common.service_type }}-docker-excluder"
+ state: "{{ docker_excluder_package_state }}"
+ when:
+ - install_docker_excluder | default(true) | bool
-- name: Install latest docker excluder
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
+ - name: Install openshift excluder
+ package:
+ name: "{{ openshift.common.service_type }}-excluder"
+ state: "{{ openshift_excluder_package_state }}"
+ when:
+ - install_openshift_excluder | default(true) | bool
when:
- - docker_excluder_enabled | default(false) | bool
- not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/reset.yml b/roles/openshift_excluder/tasks/reset.yml
deleted file mode 100644
index 486a23fd0..000000000
--- a/roles/openshift_excluder/tasks/reset.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Enable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder exclude"
- when:
- - docker_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
-
-- name: Enable excluder
- command: "{{ openshift.common.service_type }}-excluder exclude"
- when:
- - openshift_excluder_enabled | default(false) | bool
- - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml
index ef118d94c..40bf98c18 100644
--- a/roles/openshift_excluder/tasks/status.yml
+++ b/roles/openshift_excluder/tasks/status.yml
@@ -1,8 +1,4 @@
---
-# Latest versions of the excluders include a status function, old packages dont
-# So, if packages are installed, upgrade them to the latest so we get the status
-# If they're not installed when we should assume they're disabled
-
- name: Determine if excluder packages are installed
rpm_q:
name: "{{ openshift.common.service_type }}-excluder"
@@ -10,49 +6,78 @@
register: openshift_excluder_installed
failed_when: false
+# docker excluder needs to be enable by default
- name: Determine if docker packages are installed
rpm_q:
- name: "{{ openshift.common.service_type }}-excluder"
+ name: "{{ openshift.common.service_type }}-docker-excluder"
state: present
register: docker_excluder_installed
failed_when: false
-- name: Update to latest excluder packages
- package:
- name: "{{ openshift.common.service_type }}-excluder"
- state: latest
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
+# The excluder status function returns 0 when everything is excluded
+# and 1 if any packages are missing from the exclusions list and outputs a warning to stderr
+# # atomic-openshift-excluder status ; echo $?
+# exclude -- All packages excluded
+# 0
+# # atomic-openshift-excluder unexclude
+# # atomic-openshift-excluder status ; echo $?
+# unexclude -- At least one package not excluded
+# 1
-- name: Update to the latest docker-excluder packages
- package:
- name: "{{ openshift.common.service_type }}-docker-excluder"
- state: latest
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
+- block:
+ - include: init.yml
+ - block:
+ - name: Record openshift excluder status
+ command: "{{ openshift.common.service_type }}-excluder status"
+ register: excluder_status
+ failed_when: false
-- name: Record excluder status
- command: "{{ openshift.common.service_type }}-excluder"
- register: excluder_status
- when:
- - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
+ # Even though the openshift excluder is enabled
+ # if the status is non-zero, disabled the excluder
+ - name: Override openshift excluder enablement if the status is non-zero
+ set_fact:
+ disable_openshift_excluder_override: true
+ when:
+ - "{{ excluder_status.rc | default(0) != 0 }}"
-- name: Record docker excluder status
- command: "{{ openshift.common.service_type }}-docker-excluder"
- register: docker_excluder_status
- when:
- - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
- - not openshift.common.is_containerized | bool
- failed_when: false
+ - debug:
+ msg: "Disabling openshift excluder"
+ when:
+ - "{{ excluder_status.rc | default(0) != 0 }}"
+
+ when:
+ - "{{ openshift_excluder_installed.installed_versions | default([]) | length > 0 }}"
+ - "{{ openshift_excluder_on }}"
+
+ - block:
+ - name: Record docker excluder status
+ command: "{{ openshift.common.service_type }}-docker-excluder status"
+ register: docker_excluder_status
+ failed_when: false
-- name: Set excluder status facts
- set_fact:
- docker_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or docker_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
- openshift_excluder_enabled: "{{ 'false' if docker_excluder_status.rc | default(0) == 0 or openshift_excluder_installed.installed_versions | default(0) | length == 0 else 'true' }}"
+ # If the docker excluder is installed and the status is non-zero
+ # always enable the docker excluder
+ - name: Override docker excluder enablement if the status is non-zero
+ set_fact:
+ enable_docker_excluder_override: true
+ when:
+ - "{{ docker_excluder_status.rc | default(0) != 0 }}"
-- debug: var=docker_excluder_enabled
-- debug: var=openshift_excluder_enabled
+ - debug:
+ msg: "Enabling docker excluder"
+ when:
+ - "{{ docker_excluder_status.rc | default(0) != 0 }}"
+
+ # As the docker excluder status is not satisfied,
+ # re-enable entire docker excluder again
+ # At the same time keep the override set in a case other task would
+ - name: Enable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder exclude"
+
+ # Run the docker excluder status even if the excluder is disabled.
+ # In order to determine of the excluder needs to be enabled.
+ when:
+ - "{{ docker_excluder_installed.installed_versions | default([]) | length > 0 }}"
+
+ when:
+ - not openshift.common.is_containerized | bool
diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml
index 38f0759aa..4df92bc65 100644
--- a/roles/openshift_excluder/tasks/unexclude.yml
+++ b/roles/openshift_excluder/tasks/unexclude.yml
@@ -1,12 +1,19 @@
---
-- name: disable docker excluder
- command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
- when:
- - docker_excluder_enabled | bool
- - not openshift.common.is_containerized | bool
+# input variables:
+# - disable_docker_excluder
+# - disable_openshift_excluder
+- block:
+ - include: init.yml
+
+ - name: disable docker excluder
+ command: "{{ openshift.common.service_type }}-docker-excluder unexclude"
+ when:
+ - disable_docker_excluder | default(false) | bool
+
+ - name: disable openshift excluder
+ command: "{{ openshift.common.service_type }}-excluder unexclude"
+ when:
+ - disable_openshift_excluder | default(false) | bool
-- name: disable excluder
- command: "{{ openshift.common.service_type }}-excluder unexclude"
when:
- - openshift_excluder_enabled | bool
- not openshift.common.is_containerized | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 75b55c369..8ea900e21 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -2319,14 +2319,19 @@ class OpenShiftFacts(object):
protected_facts_to_overwrite)
if 'docker' in new_local_facts:
- # remove duplicate and empty strings from registry lists
+ # remove duplicate and empty strings from registry lists, preserving order
for cat in ['additional', 'blocked', 'insecure']:
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
- new_local_facts['docker'][key] = list(set(val) - set(['']))
+ seen = set()
+ new_local_facts['docker'][key] = list()
+ for registry in val:
+ if registry not in seen and registry != '':
+ seen.add(registry)
+ new_local_facts['docker'][key].append(registry)
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
isinstance(new_local_facts['docker']['log_options'], string_types):
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index c538ff7a1..73c668c72 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -13,6 +13,8 @@
l_is_node_system_container: "{{ (use_node_system_container | default(use_system_containers) | bool) }}"
l_is_master_system_container: "{{ (use_master_system_container | default(use_system_containers) | bool) }}"
l_is_etcd_system_container: "{{ (use_etcd_system_container | default(use_system_containers) | bool) }}"
+- set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
- name: Validate python version
fail:
@@ -50,6 +52,13 @@
with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
+- name: Ensure various deps for running system containers are installed
+ package: name={{ item }} state=present
+ with_items: "{{ required_system_containers_packages }}"
+ when:
+ - not l_is_atomic | bool
+ - l_any_system_container | bool
+
- name: Gather Cluster facts and set is_containerized if needed
openshift_facts:
role: common
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
index 9c3110ff6..07f5100ad 100644
--- a/roles/openshift_facts/vars/main.yml
+++ b/roles/openshift_facts/vars/main.yml
@@ -5,3 +5,8 @@ required_packages:
- python-six
- PyYAML
- yum-utils
+
+required_system_containers_packages:
+ - atomic
+ - ostree
+ - runc
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index 17a0d5301..0a6299c9b 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -1,2 +1,30 @@
---
registry_volume_claim: 'registry-claim'
+
+openshift_hosted_router_edits:
+- key: spec.strategy.rollingParams.intervalSeconds
+ value: 1
+ action: put
+- key: spec.strategy.rollingParams.updatePeriodSeconds
+ value: 1
+ action: put
+- key: spec.strategy.activeDeadlineSeconds
+ value: 21600
+ action: put
+
+openshift_hosted_routers:
+- name: router
+ replicas: "{{ replicas }}"
+ namespace: default
+ serviceaccount: router
+ selector: "{{ openshift_hosted_router_selector }}"
+ images: "{{ openshift_hosted_router_image }}"
+ edits: "{{ openshift_hosted_router_edits }}"
+ stats_port: 1936
+ ports:
+ - 80:80
+ - 443:443
+ certificates: "{{ openshift_hosted_router_certificate | default({}) }}"
+
+
+openshift_hosted_router_certificates: {}
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 607ace7f9..3b7021eae 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -11,69 +11,34 @@
- name: set_fact replicas
set_fact:
replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
+ openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
+ openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
-- block:
-
- - name: Assert that 'certfile', 'keyfile' and 'cafile' keys provided in openshift_hosted_router_certificate
- assert:
- that:
- - "'certfile' in openshift_hosted_router_certificate"
- - "'keyfile' in openshift_hosted_router_certificate"
- - "'cafile' in openshift_hosted_router_certificate"
- msg: "'certfile', 'keyfile' and 'cafile' keys must be specified when supplying the openshift_hosted_router_certificate variable."
-
- - name: Read router certificate and key
- become: no
- local_action:
- module: slurp
- src: "{{ item }}"
- register: openshift_router_certificate_output
- # Defaulting dictionary keys to none to avoid deprecation warnings
- # (future fatal errors) during template evaluation. Dictionary keys
- # won't be accessed unless openshift_hosted_router_certificate is
- # defined and has all keys (certfile, keyfile, cafile) which we
- # check above.
- with_items:
- - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
- - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
-
- - name: Persist certificate contents
- openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_router_certificate_contents: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
-
- - name: Create PEM certificate
- copy:
- content: "{{ openshift.hosted.router.certificate.contents }}"
- dest: "{{ openshift_master_config_dir }}/openshift-router.pem"
- mode: 0600
-
- when: openshift_hosted_router_certificate is defined
+- name: Get the certificate contents for router
+ copy:
+ backup: True
+ dest: "/etc/origin/master/{{ item | basename }}"
+ src: "{{ item }}"
+ with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') |
+ oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- name: Create OpenShift router
oc_adm_router:
- name: "{{ openshift.hosted.router.name | default('router') }}"
- replicas: "{{ replicas }}"
- namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
+ name: "{{ item.name }}"
+ replicas: "{{ item.replicas }}"
+ namespace: "{{ item.namespace | default('default') }}"
# This option is not yet implemented
# force_subdomain: "{{ openshift.hosted.router.force_subdomain | default(none) }}"
- service_account: router
- selector: "{{ openshift.hosted.router.selector | default(none) }}"
- images: "{{ openshift.hosted.router.registryurl | default(none) }}"
- default_cert: "{{ openshift_hosted_router_certificate is defined | default(false) | ternary(openshift_master_config_dir + '/openshift-router.pem', omit) }}"
- # These edits are being specified only to prevent 'changed' on rerun
- edits:
- - key: spec.strategy.rollingParams.intervalSeconds
- value: 1
- action: put
- - key: spec.strategy.rollingParams.updatePeriodSeconds
- value: 1
- action: put
- - key: spec.strategy.activeDeadlineSeconds
- value: 21600
- action: put
+ service_account: "{{ item.serviceaccount | default('router') }}"
+ selector: "{{ item.selector | default(none) }}"
+ images: "{{ item.images | default(omit) }}"
+ cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}"
+ key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}"
+ cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}"
+ edits: "{{ openshift_hosted_router_edits | union(item.edits) }}"
+ ports: "{{ item.ports }}"
+ stats_port: "{{ item.stats_port }}"
+ with_items: "{{ openshift_hosted_routers }}"
register: routerout
# This should probably move to module
@@ -85,7 +50,7 @@
- name: Ensure router replica count matches desired
oc_scale:
kind: dc
- name: "{{ openshift.hosted.router.name | default('router') }}"
- namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
- replicas: "{{ replicas }}"
- when: replicas | int > 0
+ name: "{{ item.name | default('router') }}"
+ namespace: "{{ item.namespace | default('default') }}"
+ replicas: "{{ item.replicas }}"
+ with_items: "{{ openshift_hosted_routers }}"
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index 9c480f73a..ab57242c8 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: openshift_master_facts
- role: openshift_facts
diff --git a/roles/openshift_master_facts/test/conftest.py b/roles/openshift_master_facts/test/conftest.py
index e67d24f04..140cced73 100644
--- a/roles/openshift_master_facts/test/conftest.py
+++ b/roles/openshift_master_facts/test/conftest.py
@@ -20,7 +20,7 @@ def priorities_lookup():
@pytest.fixture()
-def facts(request):
+def facts():
return {
'openshift': {
'common': {}
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 5a9e545a3..1fab84c71 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -131,7 +131,9 @@ def short_version_fixture(request, facts):
def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled):
facts, short_version, default_predicates = short_version_kwarg_fixture
- assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, short_version=short_version)
+ assert_ok(
+ predicates_lookup, default_predicates, variables=facts,
+ regions_enabled=regions_enabled, short_version=short_version)
@pytest.fixture(params=TEST_VARS)
@@ -143,7 +145,9 @@ def short_version_kwarg_fixture(request, facts):
def test_deployment_type_kwarg(predicates_lookup, deployment_type_kwarg_fixture, regions_enabled):
facts, deployment_type, default_predicates = deployment_type_kwarg_fixture
- assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled, deployment_type=deployment_type)
+ assert_ok(
+ predicates_lookup, default_predicates, variables=facts,
+ regions_enabled=regions_enabled, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
@@ -153,9 +157,12 @@ def deployment_type_kwarg_fixture(request, facts):
return facts, deployment_type, default_predicates
-def test_short_version_deployment_type_kwargs(predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled):
+def test_short_version_deployment_type_kwargs(
+ predicates_lookup, short_version_deployment_type_kwargs_fixture, regions_enabled):
short_version, deployment_type, default_predicates = short_version_deployment_type_kwargs_fixture
- assert_ok(predicates_lookup, default_predicates, regions_enabled=regions_enabled, short_version=short_version, deployment_type=deployment_type)
+ assert_ok(
+ predicates_lookup, default_predicates, regions_enabled=regions_enabled,
+ short_version=short_version, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 81d3ee19e..1098f9391 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -119,7 +119,9 @@ def short_version_fixture(request, facts):
def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled):
facts, short_version, default_priorities = short_version_kwarg_fixture
- assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, short_version=short_version)
+ assert_ok(
+ priorities_lookup, default_priorities, variables=facts,
+ zones_enabled=zones_enabled, short_version=short_version)
@pytest.fixture(params=TEST_VARS)
@@ -131,7 +133,9 @@ def short_version_kwarg_fixture(request, facts):
def test_deployment_type_kwarg(priorities_lookup, deployment_type_kwarg_fixture, zones_enabled):
facts, deployment_type, default_priorities = deployment_type_kwarg_fixture
- assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled, deployment_type=deployment_type)
+ assert_ok(
+ priorities_lookup, default_priorities, variables=facts,
+ zones_enabled=zones_enabled, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
@@ -141,9 +145,12 @@ def deployment_type_kwarg_fixture(request, facts):
return facts, deployment_type, default_priorities
-def test_short_version_deployment_type_kwargs(priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled):
+def test_short_version_deployment_type_kwargs(
+ priorities_lookup, short_version_deployment_type_kwargs_fixture, zones_enabled):
short_version, deployment_type, default_priorities = short_version_deployment_type_kwargs_fixture
- assert_ok(priorities_lookup, default_priorities, zones_enabled=zones_enabled, short_version=short_version, deployment_type=deployment_type)
+ assert_ok(
+ priorities_lookup, default_priorities, zones_enabled=zones_enabled,
+ short_version=short_version, deployment_type=deployment_type)
@pytest.fixture(params=TEST_VARS)
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index e33d5d497..6ec88f85e 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index e33d5d497..6ec88f85e 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/setup.cfg b/setup.cfg
index e6bf2c5d1..f808fec5a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,30 +6,3 @@ universal=1
[yamllint]
excludes=.tox,utils,files
-
-[lint]
-lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
-
-[flake8]
-exclude=.tox/*,utils/*,inventory/*
-max_line_length = 120
-ignore = E501,T003
-
-[tool:pytest]
-norecursedirs =
- .*
- __pycache__
- cover
- docs
- # utils have its own config
- utils
-python_files =
- # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
- # is Python unittest's default, while pytest discovers both "test_*.py" and
- # "*_test.py" by default.
- test_*.py
- *_tests.py
-addopts =
- --cov=.
- --cov-report=term
- --cov-report=html
diff --git a/test-requirements.txt b/test-requirements.txt
index 9bb6e058c..805828e1c 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,9 @@
-six
-pyOpenSSL
+# flake8 must be listed before pylint to avoid dependency conflicts
flake8
flake8-mutable
flake8-print
pylint
setuptools-lint
-PyYAML
yamllint
coverage
mock
diff --git a/tox.ini b/tox.ini
index 13c87f5c4..643fa774d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,21 +1,24 @@
[tox]
minversion=2.3.1
envlist =
- py{27,35}-ansible22-{pylint,unit,flake8,yamllint,generate_validation}
+ py{27,35}-{flake8,pylint,unit}
+ py27-{yamllint,ansible_syntax,generate_validation}
skipsdist=True
skip_missing_interpreters=True
[testenv]
+skip_install=True
deps =
+ -rrequirements.txt
-rtest-requirements.txt
py35-flake8: flake8-bugbear
- ansible22: ansible~=2.2
commands =
+ unit: pip install -e utils
unit: pytest {posargs}
flake8: flake8 {posargs}
pylint: python setup.py lint
yamllint: python setup.py yamllint
generate_validation: python setup.py generate_validation
-
-
+ # TODO(rhcarvalho): check syntax of other important entrypoint playbooks
+ ansible_syntax: ansible-playbook --syntax-check playbooks/byo/config.yml
diff --git a/utils/.coveragerc b/utils/.coveragerc
deleted file mode 100644
index 551e13192..000000000
--- a/utils/.coveragerc
+++ /dev/null
@@ -1,18 +0,0 @@
-[run]
-branch = True
-omit =
- */lib/python*/site-packages/*
- */lib/python*/*
- /usr/*
- setup.py
- # TODO(rhcarvalho): this is used to ignore test files from coverage report.
- # We can make this less generic when we stick with a single test pattern in
- # the repo.
- test_*.py
- *_tests.py
-
-[report]
-fail_under = 73
-
-[html]
-directory = cover
diff --git a/utils/.pylintrc b/utils/.pylintrc
deleted file mode 120000
index 30b33b524..000000000
--- a/utils/.pylintrc
+++ /dev/null
@@ -1 +0,0 @@
-../.pylintrc \ No newline at end of file
diff --git a/utils/Makefile b/utils/Makefile
deleted file mode 100644
index 038c31fcf..000000000
--- a/utils/Makefile
+++ /dev/null
@@ -1,110 +0,0 @@
-########################################################
-
-# Makefile for OpenShift: Atomic Quick Installer
-#
-# useful targets (not all implemented yet!):
-# make clean -- Clean up garbage
-# make ci ------------------- Execute CI steps (for travis or jenkins)
-
-########################################################
-
-# > VARIABLE = value
-#
-# Normal setting of a variable - values within it are recursively
-# expanded when the variable is USED, not when it's declared.
-#
-# > VARIABLE := value
-#
-# Setting of a variable with simple expansion of the values inside -
-# values within it are expanded at DECLARATION time.
-
-########################################################
-
-
-NAME := oo-install
-VENV := $(NAME)env
-TESTPACKAGE := oo-install
-SHORTNAME := ooinstall
-
-# This doesn't evaluate until it's called. The -D argument is the
-# directory of the target file ($@), kinda like `dirname`.
-ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
-MANPAGES := docs/man/man1/atomic-openshift-installer.1
-# slipped into the manpage template before a2x processing
-VERSION := 1.4
-
-# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
-YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1)
-PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print)
-
-sdist: clean
- python setup.py sdist
- rm -fR $(SHORTNAME).egg-info
-
-clean:
- @find . -type f -regex ".*\.py[co]$$" -delete
- @find . -type f \( -name "*~" -or -name "#*" \) -delete
- @rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
- @rm -fR $(VENV)
- @rm -fR .tox
-
-# To force a rebuild of the docs run 'touch' on any *.in file under
-# docs/man/man1/
-docs: $(MANPAGES)
-
-# Regenerate %.1.asciidoc if %.1.asciidoc.in has been modified more
-# recently than %.1.asciidoc.
-%.1.asciidoc: %.1.asciidoc.in
- sed "s/%VERSION%/$(VERSION)/" $< > $@
-
-# Regenerate %.1 if %.1.asciidoc or VERSION has been modified more
-# recently than %.1. (Implicitly runs the %.1.asciidoc recipe)
-%.1: %.1.asciidoc
- $(ASCII2MAN)
-
-viewcover:
- xdg-open cover/index.html
-
-# Conditional virtualenv building strategy taken from this great post
-# by Marcel Hellkamp:
-# http://blog.bottlepy.org/2012/07/16/virtualenv-and-makefiles.html
-$(VENV): $(VENV)/bin/activate
-$(VENV)/bin/activate: test-requirements.txt
- @echo "#############################################"
- @echo "# Creating a virtualenv"
- @echo "#############################################"
- test -d $(VENV) || virtualenv $(VENV)
- . $(VENV)/bin/activate && pip install setuptools==17.1.1
- . $(VENV)/bin/activate && pip install -r test-requirements.txt
- touch $(VENV)/bin/activate
-# If there are any special things to install do it here
-# . $(VENV)/bin/activate && INSTALL STUFF
-
-ci-unittests: $(VENV)
- @echo "#############################################"
- @echo "# Running Unit Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-unit,py35-unit
- @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
-
-ci-pylint: $(VENV)
- @echo "#############################################"
- @echo "# Running PyLint Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint
-
-ci-flake8: $(VENV)
- @echo "#############################################"
- @echo "# Running Flake8 Compliance Tests in virtualenv"
- @echo "#############################################"
- . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8
-
-ci-tox: $(VENV)
- . $(VENV)/bin/activate && detox
-
-ci: ci-tox
- @echo
- @echo "##################################################################################"
- @echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
- @echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'"
diff --git a/utils/README.md b/utils/README.md
index 7aa045ae4..79ea3fa9f 100644
--- a/utils/README.md
+++ b/utils/README.md
@@ -1,69 +1,14 @@
# Running Tests
-Run the command:
-
- make ci
-
-to run tests and linting tools.
-
-Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
-tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows
-for running tests in parallel.
-
-```
-pip install tox detox
-```
-
-List the test environments available:
-
-```
-tox -l
-```
-
-Run all of the tests with:
-
-```
-tox
-```
-
-Run all of the tests in parallel with detox:
-
-```
-detox
-```
-
-Run a particular test environment:
-
-```
-tox -e py27-flake8
-```
-
-Run a particular test environment in a clean virtualenv:
-
-```
-tox -r -e py35-pylint
-```
-
-If you want to enter the virtualenv created by tox to do additional
-testing/debugging:
-
-```
-source .tox/py27-flake8/bin/activate
-```
-
-You will get errors if the log files already exist and can not be
-written to by the current user (`/tmp/ansible.log` and
-`/tmp/installer.txt`). *We're working on it.*
-
+All tests can be run by running `tox`. See [running tests](..//CONTRIBUTING.md#running-tests) for more information.
# Running From Source
You will need to setup a **virtualenv** to run from source:
$ virtualenv oo-install
- $ source ./oo-install/bin/activate
- $ virtualenv --relocatable ./oo-install/
- $ python setup.py install
+ $ source oo-install/bin/activate
+ $ python setup.py develop
The virtualenv `bin` directory should now be at the start of your
`$PATH`, and `oo-install` is ready to use from your shell.
diff --git a/utils/setup.cfg b/utils/setup.cfg
index d730cd3b4..79bc67848 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -3,30 +3,3 @@
# 3. If at all possible, it is good practice to do this. If you cannot, you
# will need to generate wheels for each Python version that you support.
universal=1
-
-[aliases]
-test=pytest
-
-[flake8]
-max-line-length=120
-exclude=test/*,setup.py,oo-installenv
-ignore=E501
-
-[lint]
-lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
-
-[tool:pytest]
-testpaths = test
-norecursedirs =
- .*
- __pycache__
-python_files =
- # TODO(rhcarvalho): rename test files to follow a single pattern. "test*.py"
- # is Python unittest's default, while pytest discovers both "test_*.py" and
- # "*_test.py" by default.
- test_*.py
- *_tests.py
-addopts =
- --cov=.
- --cov-report=term
- --cov-report=html
diff --git a/utils/setup.py b/utils/setup.py
index 629d39206..6fec7b173 100644
--- a/utils/setup.py
+++ b/utils/setup.py
@@ -38,26 +38,15 @@ setup(
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
- #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['ooinstall'],
package_dir={'': 'src'},
-
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML', 'ansible'],
- # List additional groups of dependencies here (e.g. development
- # dependencies). You can install these using the following syntax,
- # for example:
- # $ pip install -e .[dev,test]
- #extras_require={
- # 'dev': ['check-manifest'],
- # 'test': ['coverage'],
- #},
-
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
deleted file mode 100644
index b26e22a7e..000000000
--- a/utils/test-requirements.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-ansible
-# flake8 moved to before setuptools-lint to satisfy mccabe dependency issue
-flake8
-setuptools-lint
-coverage
-mock
-PyYAML
-click
-backports.functools_lru_cache
-pyOpenSSL
-yamllint
-tox
-detox
-pytest
-pytest-cov
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 0cb37eaff..673997c42 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -409,8 +409,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
if result.exception is None or result.exit_code != 1:
- print("Exit code: %s" % result.exit_code)
- self.fail("Unexpected CLI return")
+ self.fail("Unexpected CLI return. Exit code: %s" % result.exit_code)
# unattended with config file and all installed hosts (with --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -600,97 +599,6 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
- # 2016-09-26 - tbielawa - COMMENTING OUT these tests FOR NOW while
- # we wait to see if anyone notices that we took away their ability
- # to set the ansible_config parameter in the command line options
- # and in the installer config file.
- #
- # We have removed the ability to set the ansible config file
- # manually so that our new quieter output mode is the default and
- # only output mode.
- #
- # RE: https://trello.com/c/DSwwizwP - atomic-openshift-install
- # should only output relevant information.
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
-
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, None, None)
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
- # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
-
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, ansible_config, ansible_config)
-
- # @patch('ooinstall.openshift_ansible.run_ansible')
- # @patch('ooinstall.openshift_ansible.load_system_facts')
- # def test_ansible_config_specified_in_installer_config(self,
- # load_facts_mock, run_ansible_mock):
-
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
- # config = SAMPLE_CONFIG % 'openshift-enterprise'
- # config = "%s\nansible_config: %s" % (config, ansible_config)
- # self._ansible_config_test(load_facts_mock, run_ansible_mock,
- # config, None, ansible_config)
-
- # #pylint: disable=too-many-arguments
- # # This method allows for drastically simpler tests to write, and the args
- # # are all useful.
- # def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
- # installer_config, ansible_config_cli=None, expected_result=None):
- # """
- # Utility method for testing the ways you can specify the ansible config.
- # """
-
- # load_facts_mock.return_value = (MOCK_FACTS, 0)
- # run_ansible_mock.return_value = 0
-
- # config_file = self.write_config(os.path.join(self.work_dir,
- # 'ooinstall.conf'), installer_config)
-
- # self.cli_args.extend(["-c", config_file])
- # if ansible_config_cli:
- # self.cli_args.extend(["--ansible-config", ansible_config_cli])
- # self.cli_args.append("install")
- # result = self.runner.invoke(cli.cli, self.cli_args)
- # self.assert_result(result, 0)
-
- # # Test the env vars for facts playbook:
- # facts_env_vars = load_facts_mock.call_args[0][2]
- # if expected_result:
- # self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
- # else:
- # # If user running test has rpm installed, this might be set to default:
- # self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
- # facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
-
- # # Test the env vars for main playbook:
- # env_vars = run_ansible_mock.call_args[0][2]
- # if expected_result:
- # self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
- # else:
- # # If user running test has rpm installed, this might be set to default:
- # #
- # # By default we will use the quiet config
- # self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
- # env_vars['ANSIBLE_CONFIG'] == cli.QUIET_ANSIBLE_CONFIG)
-
# unattended with bad config file and no installed hosts (without --force)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
@@ -1011,13 +919,7 @@ class AttendedCliTests(OOCliFixture):
full_line = "%s=%s" % (a, b)
tokens = full_line.split()
if tokens[0] == host:
- found = False
- for token in tokens:
- if token == variable:
- found = True
- continue
- self.assertTrue("Unable to find %s in line: %s" %
- (variable, full_line), found)
+ self.assertTrue(variable in tokens[1:], "Unable to find %s in line: %s" % (variable, full_line))
return
self.fail("unable to find host %s in inventory" % host)
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 5200d275d..873ac4a27 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -65,14 +65,13 @@ class OOCliFixture(OOInstallFixture):
def assert_result(self, result, exit_code):
if result.exit_code != exit_code:
- print("Unexpected result from CLI execution")
- print("Exit code: %s" % result.exit_code)
- print("Exception: %s" % result.exception)
- print(result.exc_info)
+ msg = ["Unexpected result from CLI execution\n"]
+ msg.append("Exit code: %s\n" % result.exit_code)
+ msg.append("Exception: %s\n" % result.exception)
import traceback
- traceback.print_exception(*result.exc_info)
- print("Output:\n%s" % result.output)
- self.fail("Exception during CLI execution")
+ msg.extend(traceback.format_exception(*result.exc_info))
+ msg.append("Output:\n%s" % result.output)
+ self.fail("".join(msg))
def _verify_load_facts(self, load_facts_mock):
""" Check that we ran load facts with expected inputs. """
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 2b4fce512..5651e6e7a 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -266,42 +266,3 @@ class HostTests(OOInstallFixture):
self.assertIn(node_labels_expected, legacy_inventory_line)
# An unquoted version is not present
self.assertNotIn(node_labels_bad, legacy_inventory_line)
-
-
- # def test_new_write_inventory_same_as_legacy(self):
- # """Verify the original write_host function produces the same output as the new method"""
- # yaml_props = {
- # 'ip': '192.168.0.1',
- # 'hostname': 'a.example.com',
- # 'connect_to': 'a-private.example.com',
- # 'public_ip': '192.168.0.1',
- # 'public_hostname': 'a.example.com',
- # 'new_host': True,
- # 'roles': ['node'],
- # 'other_variables': {
- # 'zzz': 'last',
- # 'foo': 'bar',
- # 'aaa': 'first',
- # },
- # }
-
- # new_node = Host(**yaml_props)
- # inventory = cStringIO()
-
- # # This is what the original 'write_host' function will
- # # generate. write_host has no return value, it just writes
- # # directly to the file 'inventory' which in this test-case is
- # # a StringIO object
- # ooinstall.openshift_ansible.write_host(
- # new_node,
- # 'node',
- # inventory,
- # schedulable=True)
- # legacy_inventory_line = inventory.getvalue()
-
- # # This is what the new method in the Host class generates
- # new_inventory_line = new_node.inventory_string('node', schedulable=True)
-
- # self.assertEqual(
- # legacy_inventory_line,
- # new_inventory_line)
diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py
index 5847fe37b..02a9754db 100644
--- a/utils/test/openshift_ansible_tests.py
+++ b/utils/test/openshift_ansible_tests.py
@@ -2,7 +2,6 @@ import os
import unittest
import tempfile
import shutil
-import yaml
from six.moves import configparser
@@ -40,17 +39,10 @@ class TestOpenShiftAnsible(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.work_dir)
- def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False):
- hosts = []
- for num in range(1, num_hosts + 1):
- hosts.append(Host(connect_to=name_prefix + str(num),
- roles=roles, new_host=new_host))
- return hosts
-
def test_generate_inventory_new_nodes(self):
- hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd']))
- hosts.extend(self.generate_hosts(1, 'node', roles=['node']))
- hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True))
+ hosts = generate_hosts(1, 'master', roles=(['master', 'etcd']))
+ hosts.extend(generate_hosts(1, 'node', roles=['node']))
+ hosts.extend(generate_hosts(1, 'new_node', roles=['node'], new_host=True))
openshift_ansible.generate_inventory(hosts)
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(self.inventory)
@@ -59,8 +51,8 @@ class TestOpenShiftAnsible(unittest.TestCase):
def test_write_inventory_vars_role_vars(self):
with open(self.inventory, 'w') as inv:
- openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'}
- openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'}
+ openshift_ansible.CFG.deployment.roles['master'].variables = {'color': 'blue'}
+ openshift_ansible.CFG.deployment.roles['node'].variables = {'color': 'green'}
openshift_ansible.write_inventory_vars(inv, None)
inventory = configparser.ConfigParser(allow_no_value=True)
@@ -69,3 +61,11 @@ class TestOpenShiftAnsible(unittest.TestCase):
self.assertEquals('blue', inventory.get('masters:vars', 'color'))
self.assertTrue(inventory.has_section('nodes:vars'))
self.assertEquals('green', inventory.get('nodes:vars', 'color'))
+
+
+def generate_hosts(num_hosts, name_prefix, roles=None, new_host=False):
+ hosts = []
+ for num in range(1, num_hosts + 1):
+ hosts.append(Host(connect_to=name_prefix + str(num),
+ roles=roles, new_host=new_host))
+ return hosts
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index cbce64f7e..cabeaee34 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -2,14 +2,14 @@
Unittests for ooinstall utils.
"""
-import six
import unittest
-import logging
-import sys
import copy
-from ooinstall.utils import debug_env, is_valid_hostname
import mock
+import six
+
+from ooinstall.utils import debug_env, is_valid_hostname
+
class TestUtils(unittest.TestCase):
"""
diff --git a/utils/tox.ini b/utils/tox.ini
deleted file mode 100644
index 2524923cb..000000000
--- a/utils/tox.ini
+++ /dev/null
@@ -1,19 +0,0 @@
-[tox]
-minversion=2.3.1
-envlist =
- py{27,35}-{flake8,unit,pylint}
-skipsdist=True
-skip_missing_interpreters=True
-
-[testenv]
-usedevelop=True
-deps =
- -rtest-requirements.txt
- py35-flake8: flake8-bugbear
-commands =
- # Needed to make detox work, since it ignores usedevelop
- # https://github.com/tox-dev/tox/issues/180
- unit: pip install -e .
- unit: pytest {posargs}
- flake8: python setup.py flake8
- pylint: python setup.py lint