summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README_GCE.md3
-rw-r--r--filter_plugins/openshift_master.py4
-rw-r--r--inventory/byo/hosts.origin.example36
-rw-r--r--inventory/byo/hosts.ose.example36
-rw-r--r--openshift-ansible.spec121
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml6
-rw-r--r--playbooks/byo/openshift-master/restart.yml14
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml29
-rw-r--r--playbooks/common/openshift-master/config.yml4
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml17
-rw-r--r--playbooks/common/openshift-master/restart_services.yml6
-rw-r--r--playbooks/common/openshift-master/validate_restart.yml (renamed from playbooks/common/openshift-master/restart.yml)13
-rw-r--r--roles/lib_openshift/library/oc_edit.py1377
-rw-r--r--roles/lib_openshift/library/oc_obj.py1444
-rw-r--r--roles/lib_openshift/library/oc_route.py1614
-rw-r--r--roles/lib_openshift/library/oc_version.py1232
-rw-r--r--roles/lib_openshift/src/ansible/oc_edit.py48
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py37
-rw-r--r--roles/lib_openshift/src/ansible/oc_route.py84
-rw-r--r--roles/lib_openshift/src/ansible/oc_version.py26
-rw-r--r--roles/lib_openshift/src/class/oc_edit.py94
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py193
-rw-r--r--roles/lib_openshift/src/class/oc_route.py170
-rw-r--r--roles/lib_openshift/src/class/oc_version.py47
-rw-r--r--roles/lib_openshift/src/doc/edit116
-rw-r--r--roles/lib_openshift/src/doc/generated10
-rw-r--r--roles/lib_openshift/src/doc/license16
-rw-r--r--roles/lib_openshift/src/doc/obj95
-rw-r--r--roles/lib_openshift/src/doc/route120
-rw-r--r--roles/lib_openshift/src/doc/version40
-rwxr-xr-xroles/lib_openshift/src/generate.py75
-rw-r--r--roles/lib_openshift/src/lib/base.py522
-rw-r--r--roles/lib_openshift/src/lib/import.py17
-rw-r--r--roles/lib_openshift/src/lib/route.py123
-rw-r--r--roles/lib_openshift/src/sources.yml38
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_route.yml77
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_version.yml17
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_version.py70
-rw-r--r--roles/lib_utils/library/yedit.py114
-rw-r--r--roles/lib_utils/src/ansible/yedit.py44
-rw-r--r--roles/lib_utils/src/class/yedit.py64
-rw-r--r--roles/lib_utils/src/doc/generated9
-rw-r--r--roles/lib_utils/src/doc/yedit6
-rwxr-xr-xroles/lib_utils/src/generate.py82
-rw-r--r--roles/lib_utils/src/sources.yml (renamed from roles/lib_utils/src/generate_sources.yml)1
-rw-r--r--roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig52
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/README.md312
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report-html.pngbin0 -> 189466 bytes
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.html396
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.json178
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/default.yaml10
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml21
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml12
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml13
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml12
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py35
-rw-r--r--roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j229
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json14
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json14
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py4
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml21
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml65
-rw-r--r--roles/openshift_metrics/README.md2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml16
-rw-r--r--roles/openshift_node/meta/main.yml4
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml26
-rw-r--r--roles/rhel_subscribe/tasks/main.yml5
-rw-r--r--setup.py54
-rw-r--r--tox.ini3
79 files changed, 9266 insertions, 392 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index efc0cbe26..fd9a1844f 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.5.0-1 ./
+3.5.1-1 ./
diff --git a/README_GCE.md b/README_GCE.md
index f909630aa..99c8715de 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -87,7 +87,8 @@ Install Dependencies
```
> Installation using Mac OSX requires pycrypto library
-> $ pip install pycrypto
+>
+> <kbd>$ pip install pycrypto</kbd>
Test The Setup
--------------
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 437f4c400..f71d9b863 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -517,7 +517,9 @@ class FilterModule(object):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- certs = ['admin.crt',
+ certs = ['ca.crt',
+ 'ca.key',
+ 'admin.crt',
'admin.key',
'admin.kubeconfig',
'master.kubelet-client.crt',
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index c800c690c..dde172c4a 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -606,17 +606,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to the global proxy
-# config values. You only need to set these if they differ from the global settings
-# above. See BuildDefaults
-# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512m
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 0e503f28d..c0dd8a1e8 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -606,17 +606,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to the global proxy
-# config values. You only need to set these if they differ from the global settings
-# above. See BuildDefaults
-# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512m
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 955772486..a2940e001 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.5.0
+Version: 3.5.1
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -16,6 +16,7 @@ BuildArch: noarch
Requires: ansible >= 2.2.0.0-1
Requires: python2
Requires: python-six
+Requires: tar
Requires: openshift-ansible-docs = %{version}-%{release}
%description
@@ -250,6 +251,124 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Jan 18 2017 Scott Dodson <sdodson@redhat.com> 3.5.1-1
+- More reliable wait for master after full host reboot. (dgoodwin@redhat.com)
+- kubelet must have rw to cgroups for pod/qos cgroups to function
+ (decarr@redhat.com)
+- Adding a few updates for python27,35 compatibility (kwoodson@redhat.com)
+- update examples to cover build default/override configuration
+ (bparees@redhat.com)
+- Fix yaml lint in easy-mode playbook (tbielawa@redhat.com)
+- Removed trailing spaces from line #34 (kunallimaye@gmail.com)
+- Install subscription-manager to fix issue-3102 (kunallimaye@gmail.com)
+- Changing formatting for issue#2244 update (kunallimaye@gmail.com)
+- Addressing Travis errors (ewolinet@redhat.com)
+- Adding --verfiy to generate script. (kwoodson@redhat.com)
+- v1.3 Add RHAMP (sdodson@redhat.com)
+- Update v1.4 content, add api-gateway (sdodson@redhat.com)
+- Add v1.5 content (sdodson@redhat.com)
+- Update example sync script (sdodson@redhat.com)
+- use pod to generate keystores (#14) (jcantrill@users.noreply.github.com)
+- Ensure serial certificate generation for node and master certificates.
+ (abutcher@redhat.com)
+- [Cert Expiry] Add serial numbers, include example PBs, docs
+ (tbielawa@redhat.com)
+- properly set changes when oc apply (jcantril@redhat.com)
+- additional cr fixes (jcantril@redhat.com)
+- metrics fixes for yamlint (jcantril@redhat.com)
+- additional code reviews (jcantril@redhat.com)
+- set replicas to current value so not to disrupt current pods (#13)
+ (jcantrill@users.noreply.github.com)
+- User provided certs pushed from control. vars reorg (#12)
+ (jcantrill@users.noreply.github.com)
+- update vars to allow scaling of components (#9)
+ (jcantrill@users.noreply.github.com)
+- allow definition of cpu/memory limits/resources (#11)
+ (jcantrill@users.noreply.github.com)
+- rename variables to be less extraneous (#10)
+ (jcantrill@users.noreply.github.com)
+- copy admin cert for use in subsequent tasks (#8)
+ (jcantrill@users.noreply.github.com)
+- Add tasks to uninstall metrics (#7) (jcantrill@users.noreply.github.com)
+- Custom certificates (#5) (bbarcaro@redhat.com)
+- prefix vars with metrics role (#4) (jcantrill@users.noreply.github.com)
+- Bruno Barcarol Guimarães work to move metrics to ansible from deployer
+ (jcantril@redhat.com)
+- Adding oc_edit module to lib_openshift. (kwoodson@redhat.com)
+- Create individual serving cert and loopback kubeconfig for additional
+ masters. (abutcher@redhat.com)
+- add configuration for build default+overrides settings (bparees@redhat.com)
+- delete idempotent (ewolinet@redhat.com)
+- additional comments addressed (ewolinet@redhat.com)
+- Updating upgrade_logging to be more idempotent (ewolinet@redhat.com)
+- Using oc_apply task for idempotent (ewolinet@redhat.com)
+- Removing shell module calls and cleaning up changed (ewolinet@redhat.com)
+- lib_openshift modules. This is the first one. oc_route.
+ (kwoodson@redhat.com)
+- Updated modify_yaml with docstring and clarifications (smilner@redhat.com)
+- Rename subrole facts -> init (rhcarvalho@gmail.com)
+- Move Python modules into role (rhcarvalho@gmail.com)
+- Document playbook directories (rhcarvalho@gmail.com)
+- Document bin/cluster tool (rhcarvalho@gmail.com)
+- keys should be lowercase according to the spec (jf.cron0@gmail.com)
+- filter: Removed unused validation calls (smilner@redhat.com)
+- Updated initializer usage in filters (smilner@redhat.com)
+- fix when statement indentation, cast to bool (jf.cron0@gmail.com)
+- add openshift_facts as role dependency (jf.cron0@gmail.com)
+- Added setup.py to flake8 tests (smilner@redhat.com)
+- Do not default registry storage kind to 'nfs' when 'nfs' group exists.
+ (abutcher@redhat.com)
+- Fix inconsistent task name (rhcarvalho@gmail.com)
+- Reduce code duplication using variable (rhcarvalho@gmail.com)
+- Another proposed update to the issue template (tbielawa@redhat.com)
+- Replace custom variables with openshift_facts (rhcarvalho@gmail.com)
+- Catch DBus exceptions on class initialization (rhcarvalho@gmail.com)
+- addressing comments (ewolinet@redhat.com)
+- Move playbook to BYO (rhcarvalho@gmail.com)
+- Fix typo in inventory README.md (lberk@redhat.com)
+- Refactor preflight check into roles (rhcarvalho@gmail.com)
+- Make flake8 (py35) happy on bare except (rhcarvalho@gmail.com)
+- Make callback plugin an always-on aggregate plugin (rhcarvalho@gmail.com)
+- Add RPM checks as an adhoc playbook (rhcarvalho@gmail.com)
+- first swing at release version wording (timbielawa@gmail.com)
+- Correct tox to run on Travis (rteague@redhat.com)
+- Adding ability to systematically modify yaml from ansible.
+ (kwoodson@redhat.com)
+- oo_filters: Moved static methods to functions (smilner@redhat.com)
+- Correct return code compairison for yamllint (rteague@redhat.com)
+- Add a fact to select --evacuate or --drain based on your OCP version
+ (tbielawa@redhat.com)
+- Update branch status (sdodson@redhat.com)
+- rename openshift_metrics to openshift_hosted_metrics (jcantril@redhat.com)
+- Update aws dynamic inventory (lhuard@amadeus.com)
+- improve issue template (sdodson@redhat.com)
+- cleanup: Removed debug prints from tests (smilner@redhat.com)
+- remove debug statement from test (jdetiber@redhat.com)
+- Support openshift_node_port_range for configuring service NodePorts
+ (ccoleman@redhat.com)
+- Workaround for dnf+docker version race condition (smilner@redhat.com)
+- use etcdctl from the container when containerized=True (gscrivan@redhat.com)
+- Partial uninstall (sejug@redhat.com)
+- increase test coverage (jdetiber@redhat.com)
+- Update aws dynamic inventory (lhuard@amadeus.com)
+- update travis to use tox for utils (jdetiber@redhat.com)
+- More toxification (jdetiber@redhat.com)
+- add test for utils to bump coverage (jdetiber@redhat.com)
+- The scaleup subcommand does not support the unattended option
+ (tbielawa@redhat.com)
+- Move role dependencies out of playbooks for openshift_master, openshift_node
+ and openshift_hosted. (abutcher@redhat.com)
+- Remove unused file (rhcarvalho@gmail.com)
+- Remove unused file (rhcarvalho@gmail.com)
+- Remove spurious argument (rhcarvalho@gmail.com)
+- Fixing collision of system.admin cert generation (ewolinet@redhat.com)
+- minor updates for code reviews, remove unused params (jcantril@redhat.com)
+- Updating to use deployer pod to generate JKS chain instead
+ (ewolinet@redhat.com)
+- Creating openshift_logging role for deploying Aggregated Logging without a
+ deployer image (ewolinet@redhat.com)
+- Begin requiring Docker 1.12. (dgoodwin@redhat.com)
+
* Mon Jan 09 2017 Scott Dodson <sdodson@redhat.com> 3.5.0-1
- Update manpage version. (tbielawa@redhat.com)
- Fix openshift_image_tag=latest. (abutcher@redhat.com)
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 4ce815271..84a5a026f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -66,6 +66,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d6af71827..7717c95e4 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -71,6 +71,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index d6115e7a5..6b69348b7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -6,8 +6,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
@@ -66,6 +66,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 8cde2ac88..92d7c943a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -71,6 +71,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
@@ -94,5 +98,7 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 0a163526a..b60807a71 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -15,4 +15,16 @@
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-- include: ../../common/openshift-master/restart.yml
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-master/validate_restart.yml
+
+- name: Restart masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 6e3e04a6b..2383836d4 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -108,10 +108,6 @@
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
openshift_certificates_redeploy: true
- role: openshift_etcd_client_certificates
etcd_certificates_redeploy: true
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8cac2fb3b..76645ff3f 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,5 +1,6 @@
---
-- hosts: localhost
+- name: Create l_oo_all_hosts group
+ hosts: localhost
connection: local
become: no
gather_facts: no
@@ -10,7 +11,8 @@
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
-- hosts: l_oo_all_hosts
+- name: Include g_*_hosts vars for hosts in group l_oo_all_hosts
+ hosts: l_oo_all_hosts
gather_facts: no
tasks:
- include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
@@ -46,3 +48,14 @@
when: openshift_docker_log_options is not defined
- include: ../initialize_facts.yml
+
+- name: Ensure clean repo cache in the event repos have been changed manually
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+ args:
+ warn: no
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9632626a4..c83923dae 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -12,10 +12,6 @@
msg: Verify the correct version was found
when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6950b6166..7f738ea0f 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -30,14 +30,6 @@
- name: Upgrade and backup etcd
include: ./etcd/main.yml
-- name: Upgrade master packages
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master config hook for v3_3.
- name: Determine if service signer cert must be created
@@ -59,14 +51,20 @@
roles:
- openshift_master_facts
-- name: Upgrade master config and systemd units
+- name: Upgrade master
hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
handlers:
- include: ../../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
post_tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
- include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
- include: upgrade_scheduler.yml
@@ -104,9 +102,12 @@
state: link
when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
+ - include: ../../openshift-master/restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+
+ - include: ../../openshift-master/restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
- set_fact:
master_update_complete: True
@@ -128,10 +129,6 @@
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
-# We are now ready to restart master services (or entire system
-# depending on openshift_rolling_restart_mode):
-- include: ../../openshift-master/restart.yml
-
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 39d64a126..de36fd263 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -134,10 +134,6 @@
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ffa23d26a..832301e3d 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -7,12 +7,19 @@
ignore_errors: true
become: yes
-# Ensure the api_port is available.
-- name: Wait for master API to come back online
- become: no
+- name: Wait for master to restart
local_action:
module: wait_for
- host="{{ openshift.common.hostname }}"
+ host="{{ inventory_hostname }}"
state=started
delay=10
- port="{{ openshift.master.api_port }}"
+ become: no
+
+# Now that ssh is back up we can wait for API on the remote system,
+# avoiding some potential connection issues from local system:
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index b40c32669..508b5a3ac 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -8,14 +8,14 @@
service:
name: "{{ openshift.common.service_type }}-master-api"
state: restarted
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
- name: Wait for master API to come back online
wait_for:
host: "{{ openshift.common.hostname }}"
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
- name: Restart master controllers
service:
name: "{{ openshift.common.service_type }}-master-controllers"
@@ -23,4 +23,4 @@
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
ignore_errors: true
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/validate_restart.yml
index 7b340887a..5dbb21502 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/validate_restart.yml
@@ -1,6 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
- name: Validate configuration for rolling restart
hosts: oo_masters_to_config
roles:
@@ -65,14 +63,3 @@
- set_fact:
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-
-- name: Restart masters
- hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
new file mode 100644
index 000000000..1a361ae20
--- /dev/null
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -0,0 +1,1377 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
new file mode 100644
index 000000000..5b501484b
--- /dev/null
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -0,0 +1,1444 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
new file mode 100644
index 000000000..19c7462ea
--- /dev/null
+++ b/roles/lib_openshift/library/oc_route.py
@@ -0,0 +1,1614 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
new file mode 100644
index 000000000..197a0a947
--- /dev/null
+++ b/roles/lib_openshift/library/oc_version.py
@@ -0,0 +1,1232 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_edit.py b/roles/lib_openshift/src/ansible/oc_edit.py
new file mode 100644
index 000000000..5c5954747
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_edit.py
@@ -0,0 +1,48 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
new file mode 100644
index 000000000..701740e4f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -0,0 +1,37 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py
new file mode 100644
index 000000000..c87e6738f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_route.py
@@ -0,0 +1,84 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_version.py b/roles/lib_openshift/src/ansible/oc_version.py
new file mode 100644
index 000000000..57ef849ca
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_version.py
@@ -0,0 +1,26 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_edit.py b/roles/lib_openshift/src/class/oc_edit.py
new file mode 100644
index 000000000..0734e2085
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_edit.py
@@ -0,0 +1,94 @@
+# pylint: skip-file
+# flake8: noqa
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
new file mode 100644
index 000000000..9d0b8e45b
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -0,0 +1,193 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
new file mode 100644
index 000000000..42af2c01c
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -0,0 +1,170 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
diff --git a/roles/lib_openshift/src/class/oc_version.py b/roles/lib_openshift/src/class/oc_version.py
new file mode 100644
index 000000000..7f8c721d8
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_version.py
@@ -0,0 +1,47 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
diff --git a/roles/lib_openshift/src/doc/edit b/roles/lib_openshift/src/doc/edit
new file mode 100644
index 000000000..212d88f65
--- /dev/null
+++ b/roles/lib_openshift/src/doc/edit
@@ -0,0 +1,116 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
diff --git a/roles/lib_openshift/src/doc/generated b/roles/lib_openshift/src/doc/generated
new file mode 100644
index 000000000..b55d18cff
--- /dev/null
+++ b/roles/lib_openshift/src/doc/generated
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_openshift/src/doc/license b/roles/lib_openshift/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_openshift/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
new file mode 100644
index 000000000..e44843eb3
--- /dev/null
+++ b/roles/lib_openshift/src/doc/obj
@@ -0,0 +1,95 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route
new file mode 100644
index 000000000..1797d4d33
--- /dev/null
+++ b/roles/lib_openshift/src/doc/route
@@ -0,0 +1,120 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
diff --git a/roles/lib_openshift/src/doc/version b/roles/lib_openshift/src/doc/version
new file mode 100644
index 000000000..c0fdd53e7
--- /dev/null
+++ b/roles/lib_openshift/src/doc/version
@@ -0,0 +1,40 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py
new file mode 100755
index 000000000..6daade108
--- /dev/null
+++ b/roles/lib_openshift/src/generate.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import argparse
+import os
+import yaml
+import six
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
+
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
+ afd.seek(0)
+ afd.write(data.getvalue())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
new file mode 100644
index 000000000..db5f4e890
--- /dev/null
+++ b/roles/lib_openshift/src/lib/base.py
@@ -0,0 +1,522 @@
+# pylint: skip-file
+# flake8: noqa
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
diff --git a/roles/lib_openshift/src/lib/import.py b/roles/lib_openshift/src/lib/import.py
new file mode 100644
index 000000000..c2b30e019
--- /dev/null
+++ b/roles/lib_openshift/src/lib/import.py
@@ -0,0 +1,17 @@
+# pylint: skip-file
+# flake8: noqa
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py
new file mode 100644
index 000000000..3130e7358
--- /dev/null
+++ b/roles/lib_openshift/src/lib/route.py
@@ -0,0 +1,123 @@
+# pylint: skip-file
+# flake8: noqa
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
new file mode 100644
index 000000000..f1fd558d3
--- /dev/null
+++ b/roles/lib_openshift/src/sources.yml
@@ -0,0 +1,38 @@
+---
+oc_edit.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/edit
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_edit.py
+- ansible/oc_edit.py
+oc_obj.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/obj
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_obj.py
+- ansible/oc_obj.py
+oc_route.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/route
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/route.py
+- class/oc_route.py
+- ansible/oc_route.py
+oc_version.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/version
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_version.py
+- ansible/oc_version.py
diff --git a/roles/lib_openshift/src/test/integration/oc_route.yml b/roles/lib_openshift/src/test/integration/oc_route.yml
new file mode 100755
index 000000000..620d5d5e7
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_route.yml
@@ -0,0 +1,77 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_route.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: key content
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.results[0]['metadata']['name'] == 'test'"
+ msg: route create failed
+
+ - name: get route
+ oc_route:
+ state: list
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results[0]['metadata']['name'] == 'test'"
+ msg: get route failed
+
+ - name: delete route
+ oc_route:
+ state: absent
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.returncode == 0"
+ msg: delete route failed
+
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - name: create route noop
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.changed == False"
+ msg: Route create not idempotent
diff --git a/roles/lib_openshift/src/test/integration/oc_version.yml b/roles/lib_openshift/src/test/integration/oc_version.yml
new file mode 100755
index 000000000..52336d8da
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_version.yml
@@ -0,0 +1,17 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_version.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: Get openshift version
+ oc_version:
+ register: versionout
+
+ - debug: var=versionout
+
+ - assert:
+ that:
+ - "'oc_numeric' in versionout.results.keys()"
+ msg: "Did not find 'oc_numeric' in version results."
diff --git a/roles/lib_openshift/src/test/unit/oc_version.py b/roles/lib_openshift/src/test/unit/oc_version.py
new file mode 100755
index 000000000..8d9128187
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_version.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc version
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 1 test in 0.597s
+#
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_version import OCVersion # noqa: E402
+
+
+# pylint: disable=unused-argument
+def oc_cmd_mock(cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''mock command for openshift_cmd'''
+ version = '''oc v3.4.0.39
+kubernetes v1.4.0+776c994
+features: Basic-Auth GSSAPI Kerberos SPNEGO
+
+Server https://internal.api.opstest.openshift.com
+openshift v3.4.0.39
+kubernetes v1.4.0+776c994
+'''
+ if 'version' in cmd:
+ return {'stderr': None,
+ 'stdout': version,
+ 'returncode': 0,
+ 'results': version,
+ 'cmd': cmd}
+
+
+class OCVersionTest(unittest.TestCase):
+ '''
+ Test class for OCVersion
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ self.oc_ver = OCVersion(None, False)
+ self.oc_ver.openshift_cmd = oc_cmd_mock
+
+ def test_get(self):
+ ''' Testing a get '''
+ results = self.oc_ver.get()
+ self.assertEqual(results['oc_short'], '3.4')
+ self.assertEqual(results['oc_numeric'], '3.4.0.39')
+ self.assertEqual(results['kubernetes_numeric'], '1.4.0')
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index fb545c7c8..6a5b40dcc 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -24,7 +24,6 @@
# limitations under the License.
#
-
# pylint: disable=wrong-import-order
import json
import os
@@ -135,6 +134,12 @@ options:
required: false
default: true
aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -163,6 +168,7 @@ EXAMPLES = '''
# b:
# c: d
'''
+# noqa: E301,E302
class YeditException(Exception):
@@ -170,6 +176,7 @@ class YeditException(Exception):
pass
+# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
@@ -590,6 +597,48 @@ class Yedit(object):
return (False, self.yaml_dict)
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
@@ -610,8 +659,8 @@ class Yedit(object):
if module.params['state'] == 'list':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
@@ -621,8 +670,8 @@ class Yedit(object):
elif module.params['state'] == 'absent':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
@@ -639,8 +688,8 @@ class Yedit(object):
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
@@ -653,12 +702,13 @@ class Yedit(object):
# we were passed a value; parse it
if module.params['value']:
- value = parse_value(module.params['value'],
- module.params['value_type'])
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
- curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
@@ -684,48 +734,6 @@ class Yedit(object):
return {'failed': True, 'msg': 'Unkown state passed'}
-def get_curr_value(invalue, val_type):
- '''return the current value'''
- if invalue is None:
- return None
-
- curr_value = invalue
- if val_type == 'yaml':
- curr_value = yaml.load(invalue)
- elif val_type == 'json':
- curr_value = json.loads(invalue)
-
- return curr_value
-
-
-def parse_value(inc_value, vtype=''):
- '''determine value type passed'''
- true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
- 'on', 'On', 'ON', ]
- false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
- 'off', 'Off', 'OFF']
-
- # It came in as a string but you didn't specify value_type as string
- # we will convert to bool if it matches any of the above cases
- if isinstance(inc_value, str) and 'bool' in vtype:
- if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
- elif isinstance(inc_value, bool) and 'str' in vtype:
- inc_value = str(inc_value)
-
- # If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
- try:
- inc_value = yaml.load(inc_value)
- except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
-
- return inc_value
-
-
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
@@ -757,7 +765,7 @@ def main():
rval = Yedit.run_ansible(module)
if 'failed' in rval and rval['failed']:
- module.fail_json(msg=rval['msg'])
+ module.fail_json(**rval)
module.exit_json(**rval)
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
index a80cd520c..8a1a7c2dc 100644
--- a/roles/lib_utils/src/ansible/yedit.py
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -2,48 +2,6 @@
# pylint: skip-file
-def get_curr_value(invalue, val_type):
- '''return the current value'''
- if invalue is None:
- return None
-
- curr_value = invalue
- if val_type == 'yaml':
- curr_value = yaml.load(invalue)
- elif val_type == 'json':
- curr_value = json.loads(invalue)
-
- return curr_value
-
-
-def parse_value(inc_value, vtype=''):
- '''determine value type passed'''
- true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
- 'on', 'On', 'ON', ]
- false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
- 'off', 'Off', 'OFF']
-
- # It came in as a string but you didn't specify value_type as string
- # we will convert to bool if it matches any of the above cases
- if isinstance(inc_value, str) and 'bool' in vtype:
- if inc_value not in true_bools and inc_value not in false_bools:
- raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
- % (inc_value, vtype))
- elif isinstance(inc_value, bool) and 'str' in vtype:
- inc_value = str(inc_value)
-
- # If vtype is not str then go ahead and attempt to yaml load it.
- if isinstance(inc_value, str) and 'str' not in vtype:
- try:
- inc_value = yaml.load(inc_value)
- except Exception:
- raise YeditException('Could not determine type of incoming ' +
- 'value. value=[%s] vtype=[%s]'
- % (type(inc_value), vtype))
-
- return inc_value
-
-
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
@@ -75,7 +33,7 @@ def main():
rval = Yedit.run_ansible(module)
if 'failed' in rval and rval['failed']:
- module.fail_json(msg=rval['msg'])
+ module.fail_json(**rval)
module.exit_json(**rval)
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
index e110bc11e..b1644f9b2 100644
--- a/roles/lib_utils/src/class/yedit.py
+++ b/roles/lib_utils/src/class/yedit.py
@@ -1,11 +1,14 @@
# flake8: noqa
# pylint: skip-file
+# noqa: E301,E302
+
class YeditException(Exception):
''' Exception class for Yedit '''
pass
+# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
@@ -426,6 +429,48 @@ class Yedit(object):
return (False, self.yaml_dict)
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
@@ -446,8 +491,8 @@ class Yedit(object):
if module.params['state'] == 'list':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
@@ -457,8 +502,8 @@ class Yedit(object):
elif module.params['state'] == 'absent':
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
@@ -475,8 +520,8 @@ class Yedit(object):
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
- content = parse_value(module.params['content'],
- module.params['content_type'])
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
@@ -489,12 +534,13 @@ class Yedit(object):
# we were passed a value; parse it
if module.params['value']:
- value = parse_value(module.params['value'],
- module.params['value_type'])
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
- curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
diff --git a/roles/lib_utils/src/doc/generated b/roles/lib_utils/src/doc/generated
new file mode 100644
index 000000000..054780313
--- /dev/null
+++ b/roles/lib_utils/src/doc/generated
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
index e367a389e..16b44943e 100644
--- a/roles/lib_utils/src/doc/yedit
+++ b/roles/lib_utils/src/doc/yedit
@@ -102,6 +102,12 @@ options:
required: false
default: true
aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_utils/src/generate.py b/roles/lib_utils/src/generate.py
index f4b46aa91..6daade108 100755
--- a/roles/lib_utils/src/generate.py
+++ b/roles/lib_utils/src/generate.py
@@ -3,42 +3,72 @@
Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
'''
+import argparse
import os
import yaml
-
-# pylint: disable=anomalous-backslash-in-string
-GEN_STR = "#!/usr/bin/env python\n" + \
- "# pylint: disable=missing-docstring\n" + \
- "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
- "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
- "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
- "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
- "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
- "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
- "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
+import six
OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
-OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'generate_sources.yml') # noqa: E501
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
def main():
''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
- library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
- sources = yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
- for fname, parts in sources.items():
- with open(os.path.join(library, fname), 'w') as afd:
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
afd.seek(0)
- afd.write(GEN_STR)
- for fpart in parts:
- with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
- # first line is pylint disable so skip it
- for idx, line in enumerate(pfd):
- if idx in [0, 1] and 'flake8: noqa' in line \
- or 'pylint: skip-file' in line:
- continue
-
- afd.write(line)
+ afd.write(data.getvalue())
if __name__ == '__main__':
diff --git a/roles/lib_utils/src/generate_sources.yml b/roles/lib_utils/src/sources.yml
index 83b21de1b..9cf3a0981 100644
--- a/roles/lib_utils/src/generate_sources.yml
+++ b/roles/lib_utils/src/sources.yml
@@ -1,5 +1,6 @@
---
yedit.py:
+- doc/generated
- doc/license
- class/import.py
- doc/yedit
diff --git a/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
new file mode 100644
index 000000000..5541c3dae
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/k8s/ssl/my.pem
+ - --my-new-parameter=openshift
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: 'true'
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
+yedittest: yedittest
+metadata-namespace: openshift-is-awesome
+nonexistingkey:
+- --my-new-parameter=openshift
+a:
+ b:
+ c: d
+e:
+ f:
+ g:
+ h:
+ i:
+ j: k
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index e2a12e5ff..e21397170 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -86,7 +86,7 @@
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift_master_hostnames | join(',') }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index a88470bdd..327cc004b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -1,5 +1,4 @@
-OpenShift Certificate Expiration Checker
-========================================
+# OpenShift Certificate Expiration Checker
OpenShift certificate expiration checking. Be warned of certificates
expiring within a configurable window of days, and notified of
@@ -21,8 +20,7 @@ cluster. For best results run `ansible-playbook` with the `-v` option.
-Role Variables
---------------
+# Role Variables
Core variables in this role:
@@ -42,8 +40,64 @@ Optional report/result saving variables in this role:
| `openshift_certificate_expiry_json_results_path` | `/tmp/cert-expiry-report.json` | The full path to save the json report as |
-Example Playbook
-----------------
+# Using this Role
+
+How to use the Certificate Expiration Checking Role.
+
+> **NOTE:** In the examples shown below, ensure you change **HOSTS**
+> to the path of your inventory file.
+
+## Run with ansible-playbook
+
+Run one of the example playbooks using an inventory file
+representative of your existing cluster. Some example playbooks are
+included in this repo, or you can read on below after this example to
+craft you own.
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+Using the `easy-mode.yaml` playbook will produce:
+
+* Reports including healthy and unhealthy hosts
+* A JSON report in `/tmp/`
+* A stylized HTML report in `/tmp/`
+
+
+## More Example Playbooks
+
+> **Note:** These Playbooks are available to run directly out of the
+> [examples/playbooks/](examples/playbooks/) directory.
+
+
+This example playbook is great if you're just wanting to **try the
+role out**. This playbook enables HTML and JSON reports. The warning
+window is set very large so you will almost always get results back.
+All certificates (healthy or not) are included in the results:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+> [View This Playbook](examples/playbooks/easy-mode.yaml)
+
+***
Default behavior:
@@ -57,6 +111,16 @@ Default behavior:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/default.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/default.yaml)
+
+***
+
+
Generate HTML and JSON artifacts in their default paths:
```yaml
@@ -72,6 +136,15 @@ Generate HTML and JSON artifacts in their default paths:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/html_and_json_default_paths.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -87,6 +160,15 @@ the module out):
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer_warning_period.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
@@ -103,9 +185,31 @@ the module out) and save the results as a JSON file:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer-warning-period-json-results.yaml)
+
-JSON Output
------------
+
+# Output Formats
+
+As noted above there are two ways to format your check report. In
+`json` format for machine parsing, or as a stylized `html` page for
+easy skimming. These options are shown below.
+
+## HTML Report
+
+![HTML Expiration Report](examples/cert-expiry-report-html.png)
+
+For an example of the HTML report you can browse, save
+[examples/cert-expiry-report.html](examples/cert-expiry-report.html)
+and then open the file in your browser.
+
+
+## JSON Report
There are two top-level keys in the saved JSON results, `data` and
`summary`.
@@ -122,85 +226,116 @@ certificates:
* expiring within the configured warning window
* already expired
-The example below is abbreviated to save space:
+For an example of the full JSON report, see [examples/cert-expiry-report.json](examples/cert-expiry-report.json).
+
+The example below is abbreviated to save space.
```json
{
- "data": {
- "192.168.124.148": {
- "etcd": [
- {
- "cert_cn": "CN:etcd-signer@1474563722",
- "days_remaining": 350,
- "expiry": "2017-09-22 17:02:25",
- "health": "warning",
- "path": "/etc/etcd/ca.crt"
- },
- ],
- "kubeconfigs": [
- {
- "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:08:57",
- "health": "warning",
- "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig"
- },
- {
- "cert_cn": "O:system:cluster-admins, CN:system:admin",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:40",
- "health": "warning",
- "path": "/etc/origin/master/admin.kubeconfig"
- }
- ],
- "meta": {
- "checked_at_time": "2016-10-07 15:26:47.608192",
- "show_all": "True",
- "warn_before_date": "2020-11-15 15:26:47.608192",
- "warning_days": 1500
- },
- "ocp_certs": [
- {
- "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:39",
- "health": "warning",
- "path": "/etc/origin/master/master.server.crt"
- },
- {
- "cert_cn": "CN:openshift-signer@1474563878",
- "days_remaining": 1810,
- "expiry": "2021-09-21 17:04:38",
- "health": "ok",
- "path": "/etc/origin/node/ca.crt"
- }
- ],
- "registry": [
- {
- "cert_cn": "CN:172.30.101.81, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.101.81, IP Address:172.30.101.81",
- "days_remaining": 728,
- "expiry": "2018-10-05 18:54:29",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/registry-certificates"
- }
- ],
- "router": [
- {
- "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:48:23",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/router-certs"
- }
- ]
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
}
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
},
- "summary": {
- "warning": 6,
- "expired": 0,
- "total": 7,
- "ok": 1
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ }
+ ],
+ "registry": [],
+ "router": []
}
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
}
```
@@ -233,24 +368,17 @@ $ jq '.summary.warning,.summary.expired' /tmp/cert-expiry-report.json
```
-Requirements
-------------
-
+# Requirements
* None
-Dependencies
-------------
-
+# Dependencies
* None
-License
--------
-
+# License
Apache License, Version 2.0
-Author Information
-------------------
+# Author Information
Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
new file mode 100644
index 000000000..799131659
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
Binary files differ
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.html b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
new file mode 100644
index 000000000..db03a5060
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
@@ -0,0 +1,396 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>OCP Certificate Expiry Report</title>
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" />
+ <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700" rel="stylesheet" />
+ <style type="text/css">
+ body {
+ font-family: 'Source Sans Pro', sans-serif;
+ margin-left: 50px;
+ margin-right: 50px;
+ margin-bottom: 20px;
+ padding-top: 70px;
+ }
+ table {
+ border-collapse: collapse;
+ margin-bottom: 20px;
+ }
+ table, th, td {
+ border: 1px solid black;
+ }
+ th, td {
+ padding: 5px;
+ }
+ .cert-kind {
+ margin-top: 5px;
+ margin-bottom: 5px;
+ }
+ footer {
+ font-size: small;
+ text-align: center;
+ }
+ tr.odd {
+ background-color: #f2f2f2;
+ }
+ </style>
+ </head>
+ <body>
+ <nav class="navbar navbar-default navbar-fixed-top">
+ <div class="container-fluid">
+ <div class="navbar-header">
+ <a class="navbar-brand" href="#">OCP Certificate Expiry Report</a>
+ </div>
+ <div class="collapse navbar-collapse">
+ <p class="navbar-text navbar-right">
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
+ </p>
+ </div>
+ </div>
+ </nav>
+
+ <h1>m01.example.com</h1>
+
+ <p>
+ Checked 12 total certificates. Expired/Warning/OK: 0/10/2. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.230920</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.230920</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(4)/hex(0x4)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/master.server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/master/ca.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(7)/hex(0x7)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/etcd.server.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:m01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:m01.example.com.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:cluster-admins, CN:system:admin</td>
+ <td><code>int(8)/hex(0x8)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/admin.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:masters, CN:system:openshift-master</td>
+ <td><code>int(3)/hex(0x3)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/openshift-master.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:routers, CN:system:openshift-router</td>
+ <td><code>int(9)/hex(0x9)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-router.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:registries, CN:system:openshift-registry</td>
+ <td><code>int(10)/hex(0xa)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-registry.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local</td>
+ <td><code>int(5050662940948454653)/hex(0x46178f2f6b765cfd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:46</td>
+ <td>/api/v1/namespaces/default/secrets/router-certs</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251</td>
+ <td><code>int(13)/hex(0xd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:54</td>
+ <td>/api/v1/namespaces/default/secrets/registry-certificates</td>
+ </tr>
+ </table>
+ <hr />
+ <h1>n01.example.com</h1>
+
+ <p>
+ Checked 3 total certificates. Expired/Warning/OK: 0/2/1. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.217103</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.217103</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:n01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:n01.example.com.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ </table>
+ <hr />
+
+ <footer>
+ <p>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
+ </p>
+ <p>
+ Status icons from bootstrap/glyphicon
+ </p>
+ </footer>
+ </body>
+</html>
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.json b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
new file mode 100644
index 000000000..8206e2842
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
@@ -0,0 +1,178 @@
+{
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ },
+ {
+ "cert_cn": "O:system:cluster-admins, CN:system:admin",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/admin.kubeconfig",
+ "serial": 8,
+ "serial_hex": "0x8"
+ },
+ {
+ "cert_cn": "O:system:masters, CN:system:openshift-master",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-master.kubeconfig",
+ "serial": 3,
+ "serial_hex": "0x3"
+ },
+ {
+ "cert_cn": "O:system:routers, CN:system:openshift-router",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-router.kubeconfig",
+ "serial": 9,
+ "serial_hex": "0x9"
+ },
+ {
+ "cert_cn": "O:system:registries, CN:system:openshift-registry",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-registry.kubeconfig",
+ "serial": 10,
+ "serial_hex": "0xa"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
+ },
+ {
+ "cert_cn": "CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/master/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
+ },
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [],
+ "router": []
+ }
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
+}
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/default.yaml b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
new file mode 100644
index 000000000..630135cae
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
@@ -0,0 +1,10 @@
+---
+# Default behavior, you will need to ensure you run ansible with the
+# -v option to see report results:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
new file mode 100644
index 000000000..d0209426f
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
@@ -0,0 +1,21 @@
+---
+# This example playbook is great if you're just wanting to try the
+# role out.
+#
+# This example enables HTML and JSON reports
+#
+# The warning window is set very large so you will almost always get results back
+#
+# All certificates (healthy or not) are included in the results
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
new file mode 100644
index 000000000..d80cb6ff4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
@@ -0,0 +1,12 @@
+---
+# Generate HTML and JSON artifacts in their default paths:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
new file mode 100644
index 000000000..87a0f3be4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
@@ -0,0 +1,13 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out) and save the results as a JSON file:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
new file mode 100644
index 000000000..960457c4b
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
@@ -0,0 +1,12 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out):
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index a474b36b0..85671b164 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -122,6 +122,8 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ cert_serial = cert_loaded.get_serial_number()
+
######################################################################
# Read all possible names from the cert
cert_subjects = []
@@ -178,7 +180,7 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
time_remaining = cert_expiry_date - now
- return (cert_subject, cert_expiry_date, time_remaining)
+ return (cert_subject, cert_expiry_date, time_remaining, cert_serial)
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
@@ -210,6 +212,7 @@ Return:
cert_meta['health'] = 'ok'
cert_meta['expiry'] = expiry_str
+ cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
cert_list.append(cert_meta)
return cert_list
@@ -373,7 +376,10 @@ an OpenShift Container Platform cluster
for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
- cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(cert, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -381,6 +387,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
@@ -420,7 +427,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -428,6 +436,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -448,7 +457,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -456,6 +466,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -500,7 +511,8 @@ an OpenShift Container Platform cluster
c = fp.read()
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -508,6 +520,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
@@ -537,7 +550,8 @@ an OpenShift Container Platform cluster
with open(etcd_cert, 'r') as etcd_fp:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(etcd_fp.read(), now)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -545,6 +559,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
@@ -581,7 +596,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -589,6 +605,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
@@ -610,7 +627,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -618,6 +636,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
diff --git a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
index b05110336..1d4bb24e9 100644
--- a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
+++ b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
@@ -45,11 +45,20 @@
</div>
<div class="collapse navbar-collapse">
<p class="navbar-text navbar-right">
- <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
- target="_blank"
- class="navbar-link">
- <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
- </a>
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
</p>
</div>
</div>
@@ -71,12 +80,13 @@
{# These are hard-coded right now, but should be grabbed dynamically from the registered results #}
{%- for kind in ['ocp_certs', 'etcd', 'kubeconfigs', 'router', 'registry'] -%}
<tr>
- <th colspan="6" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
</tr>
<tr>
<th>&nbsp;</th>
<th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
<th>Health</th>
<th>Days Remaining</th>
<th>Expiration Date</th>
@@ -98,6 +108,7 @@
<tr class="{{ loop.cycle('odd', 'even') }}">
<td style="text-align:center"><i class="{{ health_icon }}"></i></td>
<td style="width:33%">{{ v.cert_cn }}</td>
+ <td><code>int({{ v.serial }})/hex({{ v.serial_hex }})</code></td>
<td>{{ v.health }}</td>
<td>{{ v.days_remaining }}</td>
<td>{{ v.expiry }}</td>
@@ -114,7 +125,11 @@
<footer>
<p>
- Expiration report generated by <a href="https://github.com/openshift/openshift-ansible" target="_blank">openshift-ansible</a>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
</p>
<p>
Status icons from bootstrap/glyphicon
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
index 62ccc5b7f..ab1c85b7e 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
@@ -98,14 +98,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -245,12 +237,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
index 50c4ad566..87c439ad2 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
@@ -115,14 +115,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -262,12 +254,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 8bbfdf7bf..64bc33435 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -105,9 +105,9 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def add_facts_for(self, comp, kind, name=None, facts=None):
''' Add facts for the provided kind '''
- if comp in self.facts is False:
+ if comp not in self.facts:
self.facts[comp] = dict()
- if kind in self.facts[comp] is False:
+ if kind not in self.facts[comp]:
self.facts[comp][kind] = dict()
if name:
self.facts[comp][kind][name] = facts
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
index b24a7c342..8fcf517ad 100644
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -49,7 +49,7 @@
- copy:
content: "{{curator_config_contents}}"
dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contenets is defined
+ when: curator_config_contents is defined
changed_when: no
- command: >
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index af03e9371..a9699adb8 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -23,23 +23,30 @@
loop_control:
loop_var: install_component
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
- name: Create objects
include: oc_apply.yaml
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- namespace: "{{ openshift_logging_namespace }}"
- - file_name: "{{ file }}"
- - file_content: "{{ lookup('file', file) | from_yaml }}"
- with_fileglob:
- - "{{ mktemp.stdout }}/templates/*.yaml"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
when: not ansible_check_mode
- name: Printing out objects to create
- debug: msg="{{lookup('file', file)|quote}}"
- with_fileglob:
- - "{{mktemp.stdout}}/templates/*.yaml"
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
loop_control:
loop_var: file
when: ansible_check_mode
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index e9b7de330..4620dd877 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -30,7 +30,6 @@
| oo_collect(attribute='stat.exists')
| list)) }}"
-
- name: Ensure the generated_configs directory present
file:
path: "{{ openshift_master_generated_config_dir }}"
@@ -39,30 +38,50 @@
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item }}"
- dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
- state: hard
- with_items:
- - ca.crt
- - ca.key
- - ca.serial.txt
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
- delegate_to: "{{ openshift_ca_host }}"
-
-- name: Create the master certificates if they do not already exist
+- name: Create the master server certificate
command: >
- {{ openshift.common.client_binary }} adm create-master-certs
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --cert-dir={{ openshift_master_generated_config_dir }}
+ --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
+ --cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt
+ --key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
--overwrite=false
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
+ --groups=system:masters,system:openshift-master
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ args:
+ creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- file:
src: "{{ openshift_master_config_dir }}/{{ item }}"
@@ -86,7 +105,7 @@
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
+ register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
@@ -104,7 +123,7 @@
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ openshift_master_generated_config_dir }}.tgz"
- dest: "{{ g_master_mktemp.stdout }}/"
+ dest: "{{ g_master_certs_mktemp.stdout }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
@@ -119,11 +138,11 @@
- name: Unarchive the tarball on the master
unarchive:
- src: "{{ g_master_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
+ src: "{{ g_master_certs_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
dest: "{{ openshift_master_config_dir }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
-- file: name={{ g_master_mktemp.stdout }} state=absent
+- file: name={{ g_master_certs_mktemp.stdout }} state=absent
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index f4c47c7bb..0f287e944 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -8,7 +8,7 @@ Requirements
The following variables need to be set and will be validated:
-- `openshift_metrics_hostname`: hostname used on the hawkular metrics route.
+- `openshift_metrics_hawkular_hostname`: hostname used on the hawkular metrics route.
- `openshift_metrics_project`: project (i.e. namespace) where the components will be
deployed.
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index bab37dbfb..ddaa54438 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -20,15 +20,23 @@
loop_control:
loop_var: include_file
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item.path}}
+ register: object_defs
+ with_items: "{{object_def_files.files}}"
+ changed_when: no
+
- name: Create objects
include: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_project }}"
- file_name: "{{ item }}"
- file_content: "{{ lookup('file',item) | from_yaml }}"
- with_fileglob:
- - "{{ mktemp.stdout }}/templates/*.yaml"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
- name: Scaling up cluster
include: start_metrics.yaml
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 91f118191..10036abed 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -17,8 +17,6 @@ dependencies:
- role: openshift_docker
- role: openshift_node_certificates
- role: openshift_cloud_provider
-- role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- role: os_firewall
os_firewall_allow:
- service: Kubernetes kubelet
@@ -43,3 +41,5 @@ dependencies:
- service: Kubernetes service NodePort UDP
port: "{{ openshift_node_port_range | default('') }}/udp"
when: openshift_node_port_range is defined
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b5865a50..e33d5d497 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 717bf3cea..a263f4f3a 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -49,32 +49,38 @@
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_node_generated_config_dir }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}
--groups=system:nodes
--master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
- --user=system:node:{{ openshift.common.hostname }}
+ --user=system:node:{{ hostvars[item].openshift.common.hostname }}
args:
- creates: "{{ openshift_node_generated_config_dir }}"
- when: node_certs_missing | bool
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- --cert={{ openshift_node_generated_config_dir }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
+ --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
--overwrite=true
- --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }},{{ openshift.common.ip }},{{ openshift.common.public_ip }}
+ --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
args:
- creates: "{{ openshift_node_generated_config_dir }}/server.crt"
- when: node_certs_missing | bool
- delegate_to: "{{ openshift_ca_host}}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index ba3b9a923..28c3c7080 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -27,6 +27,11 @@
creates: /etc/rhsm/ca/katello-server-ca.pem
when: rhel_subscription_server is defined and rhel_subscription_server
+- name: Install Red Hat Subscription manager
+ yum:
+ name: subscription-manager
+ state: present
+
- name: RedHat subscriptions
redhat_subscription:
username: "{{ rhel_subscription_user }}"
diff --git a/setup.py b/setup.py
index c826c167f..372aca9ff 100644
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@ from __future__ import print_function
import os
import fnmatch
import re
-
+import sys
import yaml
# Always prefer setuptools over distutils
@@ -146,6 +146,57 @@ class OpenShiftAnsiblePylint(PylintCommand):
return func(*func_args, **func_kwargs)
+class OpenShiftAnsibleGenerateValidation(Command):
+ ''' Command to run generated module validation'''
+ description = "Run generated module validation"
+ user_options = []
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ pass
+
+ # self isn't used but I believe is required when it is called.
+ # pylint: disable=no-self-use
+ def run(self):
+ ''' run command '''
+ # find the files that call generate
+ generate_files = find_files('roles',
+ ['inventory',
+ 'test',
+ 'playbooks',
+ 'utils'],
+ None,
+ 'generate.py$')
+
+ if len(generate_files) < 1:
+ print('Did not find any code generation. Please verify module code generation.') # noqa: E501
+ raise SystemExit(1)
+
+ errors = False
+ for gen in generate_files:
+ print('Checking generated module code: {0}'.format(gen))
+ try:
+ sys.path.insert(0, os.path.dirname(gen))
+ # we are importing dynamically. This isn't in
+ # the python path.
+ # pylint: disable=import-error
+ import generate
+ generate.verify()
+ except generate.GenerateAnsibleException as gae:
+ print(gae.args)
+ errors = True
+
+ if errors:
+ print('Found errors while generating module code.')
+ raise SystemExit(1)
+
+ print('\nAll generate scripts passed.\n')
+
+
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
@@ -188,6 +239,7 @@ setup(
'sdist': UnsupportedCommand,
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
+ 'generate_validation': OpenShiftAnsibleGenerateValidation,
},
packages=[],
)
diff --git a/tox.ini b/tox.ini
index 158974fbe..4d3594023 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion=2.3.1
envlist =
- py{27,35}-ansible22-{pylint,unit,flake8,yamllint}
+ py{27,35}-ansible22-{pylint,unit,flake8,yamllint,generate_validation}
skipsdist=True
skip_missing_interpreters=True
@@ -16,3 +16,4 @@ commands =
pylint: python setup.py lint
yamllint: python setup.py yamllint
unit: nosetests
+ generate_validation: python setup.py generate_validation