summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md13
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--callback_plugins/aa_version_requirement.py13
-rw-r--r--openshift-ansible.spec66
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml11
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml2
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/README.md18
l---------playbooks/byo/openshift-cluster/upgrades/v3_5/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml100
-rw-r--r--playbooks/byo/openshift-master/restart.yml10
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml10
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml8
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml10
-rw-r--r--playbooks/byo/openshift_facts.yml14
-rw-r--r--playbooks/byo/rhel_subscribe.yml15
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml3
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml11
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml8
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml15
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml3
-rw-r--r--roles/lib_openshift/library/oadm_manage_node.py1484
-rw-r--r--roles/lib_openshift/library/oc_edit.py25
-rw-r--r--roles/lib_openshift/library/oc_obj.py25
-rw-r--r--roles/lib_openshift/library/oc_route.py25
-rw-r--r--roles/lib_openshift/library/oc_scale.py27
-rw-r--r--roles/lib_openshift/library/oc_secret.py25
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py1543
-rw-r--r--roles/lib_openshift/library/oc_version.py25
-rw-r--r--roles/lib_openshift/src/ansible/oadm_manage_node.py38
-rw-r--r--roles/lib_openshift/src/ansible/oc_serviceaccount.py30
-rw-r--r--roles/lib_openshift/src/class/oadm_manage_node.py209
-rw-r--r--roles/lib_openshift/src/class/oc_scale.py2
-rw-r--r--roles/lib_openshift/src/class/oc_serviceaccount.py165
-rw-r--r--roles/lib_openshift/src/doc/manage_node88
-rw-r--r--roles/lib_openshift/src/doc/serviceaccount68
-rw-r--r--roles/lib_openshift/src/lib/base.py25
-rw-r--r--roles/lib_openshift/src/lib/serviceaccount.py129
-rw-r--r--roles/lib_openshift/src/sources.yml26
-rwxr-xr-xroles/lib_openshift/src/test/integration/oadm_manage_node.yml58
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_scale.yml19
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_serviceaccount.yml101
-rwxr-xr-xroles/lib_openshift/src/test/unit/oadm_manage_node.py177
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_scale.py24
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_secret.py2
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_serviceaccount.py114
-rw-r--r--roles/lib_utils/library/repoquery.py607
-rw-r--r--roles/lib_utils/library/yedit.py19
-rw-r--r--roles/lib_utils/src/ansible/repoquery.py35
-rw-r--r--roles/lib_utils/src/class/import.py11
-rw-r--r--roles/lib_utils/src/class/repoquery.py156
-rw-r--r--roles/lib_utils/src/doc/repoquery275
-rw-r--r--roles/lib_utils/src/lib/import.py14
-rw-r--r--roles/lib_utils/src/lib/repoquery.py92
-rw-r--r--roles/lib_utils/src/sources.yml11
-rwxr-xr-xroles/lib_utils/src/test/integration/repoquery.yml136
-rwxr-xr-xroles/lib_utils/src/test/unit/repoquery.py87
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md5
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json575
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json519
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json532
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json541
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json598
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py17
-rw-r--r--roles/openshift_hosted_logging/handlers/main.yml5
-rw-r--r--roles/openshift_hosted_logging/meta/main.yaml1
-rw-r--r--roles/openshift_hosted_logging/tasks/deploy_logging.yaml2
-rw-r--r--roles/openshift_hosted_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/handlers/main.yml5
-rw-r--r--roles/openshift_logging/meta/main.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml46
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml10
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml2
-rw-r--r--roles/openshift_logging/tasks/main.yaml4
-rw-r--r--roles/openshift_logging/tasks/scale.yaml28
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml61
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml61
-rw-r--r--roles/openshift_logging/tasks/update_master_config.yaml7
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml8
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py84
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py61
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py23
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py16
-rw-r--r--roles/openshift_metrics/meta/main.yaml1
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml48
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml9
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/scale.yaml30
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml27
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml27
-rw-r--r--utils/src/ooinstall/cli_installer.py30
107 files changed, 9545 insertions, 371 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 2a4f80a36..cdfd93725 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,3 +1,16 @@
+### <HTPASSWD_AUTH>
+
+We are aware of the current issues related to htpasswd_auth failures
+Please downgrade to ansible 2.2.0.0 until a fix is released.
+You can track the status of the bug fix in this issue:
+https://github.com/openshift/openshift-ansible/issues/3111
+Please erase this <HTPASSWD_AUTH> section if it does not apply to you.
+
+Thanks - 2017-01-31
+
+### </HTPASSWD_AUTH>
+
+
#### Description
Provide a brief description of your issue here. For example:
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index d29838038..3b7826d31 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.5.2-1 ./
+3.5.3-1 ./
diff --git a/callback_plugins/aa_version_requirement.py b/callback_plugins/aa_version_requirement.py
index 1cca19a45..40affb18b 100644
--- a/callback_plugins/aa_version_requirement.py
+++ b/callback_plugins/aa_version_requirement.py
@@ -30,7 +30,12 @@ else:
# Set to minimum required Ansible version
REQUIRED_VERSION = '2.2.0.0'
-DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION
+DESCRIPTION = "Supported versions: %s or newer (except 2.2.1.0)" % REQUIRED_VERSION
+FAIL_ON_2_2_1_0 = "There are known issues with Ansible version 2.2.1.0 which " \
+ "are impacting OpenShift-Ansible. Please use Ansible " \
+ "version 2.2.0.0 or a version greater than 2.2.1.0. " \
+ "See this issue for more details: " \
+ "https://github.com/openshift/openshift-ansible/issues/3111"
def version_requirement(version):
@@ -58,3 +63,9 @@ class CallbackModule(CallbackBase):
'FATAL: Current Ansible version (%s) is not supported. %s'
% (__version__, DESCRIPTION), color='red')
sys.exit(1)
+
+ if __version__ == '2.2.1.0':
+ display(
+ 'FATAL: Current Ansible version (%s) is not supported. %s'
+ % (__version__, FAIL_ON_2_2_1_0), color='red')
+ sys.exit(1)
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 85675f5f9..9faf3e78e 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.5.2
+Version: 3.5.3
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -253,6 +253,70 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Jan 31 2017 Scott Dodson <sdodson@redhat.com> 3.5.3-1
+- Adding bool filter to ensure that we correctly set ops host for fluentd
+ (ewolinet@redhat.com)
+- Set default GCE hostname to shost instance name. (abutcher@redhat.com)
+- Fail on Ansible version 2.2.1.0 (rteague@redhat.com)
+- During node upgrade upgrade openvswitch rpms (sdodson@redhat.com)
+- HTPASSWD_AUTH (tbielawa@redhat.com)
+- Added repoquery to lib_utils. (twiest@redhat.com)
+- Create v3_5 upgrade playbooks (rteague@redhat.com)
+- GCE deployment fails due to invalid lookup (ccoleman@redhat.com)
+- Resolving yamllint issues from logging playbooks (ewolinet@redhat.com)
+- Updating openshift_hosted_logging to update master-configs with
+ publicLoggingURL (ewolinet@redhat.com)
+- Added oc_serviceaccount to lib_openshift. (twiest@redhat.com)
+- Breaking out master-config changing and updated playbook to apply change to
+ other masters (ewolinet@redhat.com)
+- fix negative stride encountered from openshift_logging (jcantril@redhat.com)
+- add persistent versions of quickstarts (bparees@redhat.com)
+- Fixing docs. Added bugzilla to doc. (kwoodson@redhat.com)
+- ensuring ruamel.yaml is on target for oc_scale (ewolinet@redhat.com)
+- Updating to correctly pull handler for openshift_logging. Adding logic to
+ openshift_hosted_logging too (ewolinet@redhat.com)
+- Adding names to plays and standardizing (rteague@redhat.com)
+- Updating openshift_logging role to add kibana public url to loggingPublicURL
+ in master-config (ewolinet@redhat.com)
+- Only manual scale down being allowed now (ewolinet@redhat.com)
+- adopt oc_scale for openshift_metrics role (jcantril@redhat.com)
+- fix 1414625. Additional fix to run password commands on control node
+ (jcantril@redhat.com)
+- adopt oc_scale module for openshift_logging role (jcantril@redhat.com)
+- Adding fix for when the resource does not exist. Added test cases.
+ (kwoodson@redhat.com)
+- Updating to reuse previous ES DC names and always generate DCs
+ (ewolinet@redhat.com)
+- Correct usage of draining nodes (rteague@redhat.com)
+- Fixing fluentd node labelling (ewolinet@redhat.com)
+- Fixing linters. (kwoodson@redhat.com)
+- Fixing base.py for node and scale. Autogenerated code. (kwoodson@redhat.com)
+- Added unit integration tests. Enhanced unit tests. Fixed an issue in
+ openshift_cmd for namespace. (kwoodson@redhat.com)
+- Adding oadm_manage_node to lib_openshift. (kwoodson@redhat.com)
+- Fixing namespace param in doc to reflect default value. (kwoodson@redhat.com)
+- .gitignore cleanup (rteague@redhat.com)
+- Standardize add_host: with name and changed_when (rteague@redhat.com)
+- Adding banners. Small bug fix to namespace appending in base.
+ (kwoodson@redhat.com)
+- Comma separate no_proxy host list in openshift_facts so that it appears as a
+ string everywhere it is used. (abutcher@redhat.com)
+- Fixing tests and linting. (kwoodson@redhat.com)
+- Adding unit test for oc_scale (kwoodson@redhat.com)
+- Adding integration test for oc_scale. (kwoodson@redhat.com)
+- Adding oc_scale to lib_openshift. (kwoodson@redhat.com)
+- Add 10 second wait after disabling firewalld (sdodson@redhat.com)
+- Added oc_secret to lib_openshift. (twiest@redhat.com)
+- Remove master_count restriction. (abutcher@redhat.com)
+- flake8 mccabe dependency fix (rteague@redhat.com)
+- Generate the artifacts from fragments. (tbielawa@redhat.com)
+- Update the generators to include fragment banners (tbielawa@redhat.com)
+- Make use of AnsibleDumper in openshift_master filters s.t. we can represent
+ AnsibleUnsafeText when dumping yaml. (abutcher@redhat.com)
+- Set metrics url even if metrics_deploy is false
+ (alberto.rodriguez.peon@cern.ch)
+- Template update for Hawkular Metrics 0.23 (mwringe@redhat.com)
+
* Wed Jan 25 2017 Scott Dodson <sdodson@redhat.com> 3.5.2-1
- Sync latest image streams (sdodson@redhat.com)
- Fix containerized haproxy config (andrew@andrewklau.com)
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index def1d24e0..0b30a221d 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -3,3 +3,14 @@
roles:
- role: openshift_hosted_logging
openshift_hosted_logging_cleanup: no
+
+- name: Update master-config for publicLoggingURL
+ hosts: masters:!masters[0]
+ pre_tasks:
+ - set_fact:
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ tasks:
+ - include_role:
+ name: openshift_hosted_logging
+ tasks_from: update_master_config
+ when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 410f70d74..32f9ebfd3 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,8 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index 3a18e800e..eebfcd20d 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -4,8 +4,6 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- name: Create initial host groups for localhost
hosts: localhost
connection: local
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index 8422789b1..753248855 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,8 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index de4e34e2d..e5b80a9b4 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.2 to 3.3](v3_3/README.md)
-- [OpenShift Enterprise 3.1 to 3.2](v3_2/README.md)
+- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md)
+- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 6de054937..d5fd7c424 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,9 +1,12 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -13,8 +16,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 84a5a026f..bb08ca837 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -6,8 +6,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index 7717c95e4..907196d8f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -15,8 +15,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index e2a33cc00..5e28072da 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -8,8 +8,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 92d7c943a..25b669f86 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -15,8 +15,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index f385d4f22..9868cb5b4 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -8,8 +8,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md
new file mode 100644
index 000000000..53eebe65e
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/README.md
@@ -0,0 +1,18 @@
+# v3.5 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..bef15eaab
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,99 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..dd88dde5f
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,102 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..931a1bcd7
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,100 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index c468a898d..3e58ccbcc 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,8 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index cb9140ee2..7075bb59e 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,8 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
index 7c453986e..9bb3ea17f 100644
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,7 +15,8 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Install and configure NetworkManager
+ hosts: l_oo_all_hosts
become: yes
tasks:
- name: install NetworkManager
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 1f2509a67..2b10b6c76 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,8 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 50936941a..025983662 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,10 +1,13 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- - include_vars: openshift-cluster/cluster_hosts.yml
+ - include_vars: ../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
add_host:
name: "{{ item }}"
@@ -12,10 +15,13 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- - include_vars: openshift-cluster/cluster_hosts.yml
+ - include_vars: ../byo/openshift-cluster/cluster_hosts.yml
- include: ../common/openshift-cluster/evaluate_groups.yml
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index e96c43214..8e7568e33 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,8 +1,11 @@
---
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -12,14 +15,18 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- - include_vars: openshift-cluster/cluster_hosts.yml
+ - include_vars: ../byo/openshift-cluster/cluster_hosts.yml
- include: ../common/openshift-cluster/evaluate_groups.yml
-- hosts: l_oo_all_hosts
+- name: Subscribe hosts, update repos and update OS packages
+ hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index a1bd1bd92..07b38920f 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,6 +1,7 @@
---
# NOTE: requires openshift_facts be run
-- hosts: l_oo_all_hosts
+- name: Verify compatible yum/subscription-manager combination
+ hosts: l_oo_all_hosts
gather_facts: no
tasks:
# See:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index ec5b18389..021d19dad 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -45,3 +45,14 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+
+- name: Update master-config for publicLoggingURL
+ hosts: masters:!oo_first_master
+ pre_tasks:
+ - set_fact:
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ tasks:
+ - include_role:
+ name: openshift_hosted_logging
+ tasks_from: update_master_config
+ when: openshift_hosted_logging_deploy | default(false) | boola
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 6347cbc26..82f18f5e1 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -3,3 +3,11 @@
hosts: oo_first_master
roles:
- openshift_logging
+
+- name: Update Master configs
+ hosts: masters:!oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_logging
+ tasks_from: update_master_config
+ when: openshift_logging_install_logging | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index e3d16d359..b83e4d821 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -1,7 +1,8 @@
---
- include: evaluate_groups.yml
-- hosts: oo_hosts_to_update
+- name: Subscribe hosts, update repos and update OS packages
+ hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 235853b0f..a3b8c489e 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,9 +1,11 @@
---
-- name: Create l_oo_all_hosts group
+- name: Create initial host groups for localhost
hosts: localhost
connection: local
become: no
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- name: Evaluate group l_oo_all_hosts
@@ -13,9 +15,11 @@
with_items: "{{ g_all_hosts | default([]) }}"
changed_when: False
-- name: Include g_*_hosts vars for hosts in group l_oo_all_hosts
+- name: Create initial host groups for all hosts
hosts: l_oo_all_hosts
gather_facts: no
+ tags:
+ - always
tasks:
- include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
@@ -61,3 +65,17 @@
when: not openshift.common.is_atomic | bool
args:
warn: no
+
+- name: Ensure firewall is not switched during upgrade
+ hosts: oo_all_hosts
+ tasks:
+ - name: Check if iptables is running
+ command: systemctl status iptables
+ ignore_errors: true
+ changed_when: false
+ register: service_iptables_status
+
+ - name: Set fact os_firewall_use_firewalld FALSE for iptables
+ set_fact:
+ os_firewall_use_firewalld: false
+ when: "'Active: active' in service_iptables_status.stdout"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 23b976192..9cad931af 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -32,7 +32,7 @@
include: ./etcd/main.yml
# Create service signer cert when missing. Service signer certificate
-# is added to master config in the master config hook for v3_3.
+# is added to master config in the master_config_upgrade hook.
- name: Determine if service signer cert must be created
hosts: oo_first_master
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 5fa74898f..a6a49e5ff 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -81,6 +81,21 @@
failed_when: false
when: openshift.common.is_containerized | bool
+ - name: Upgrade openvswitch
+ package:
+ name: openvswitch
+ state: latest
+ register: ovs_pkg
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+
+ - name: Restart openvswitch
+ systemd:
+ name: openvswitch
+ state: restarted
+ when:
+ - inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+ - ovs_pkg | changed
+
# Mandatory Docker restart, ensure all containerized services are running:
- include: docker/restart.yml
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 832301e3d..475144dbf 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -10,9 +10,10 @@
- name: Wait for master to restart
local_action:
module: wait_for
- host="{{ inventory_hostname }}"
+ host="{{ ansible_host }}"
state=started
delay=10
+ timeout=600
become: no
# Now that ssh is back up we can wait for API on the remote system,
diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py
new file mode 100644
index 000000000..88e573de6
--- /dev/null
+++ b/roles/lib_openshift/library/oadm_manage_node.py
@@ -0,0 +1,1484 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/manage_node -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oadm_manage_node
+short_description: Module to manage openshift nodes
+description:
+ - Manage openshift nodes programmatically.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ node:
+ description:
+ - A list of the nodes being managed
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - The selector when filtering on node labels
+ required: false
+ default: None
+ aliases: []
+ pod_selector:
+ description:
+ - A selector when filtering on pod labels.
+ required: false
+ default: None
+ aliases: []
+ evacuate:
+ description:
+ - Remove all pods from a node.
+ required: false
+ default: False
+ aliases: []
+ schedulable:
+ description:
+ - whether or not openshift can schedule pods on this node
+ required: False
+ default: None
+ aliases: []
+ dry_run:
+ description:
+ - This shows the pods that would be migrated if evacuate were called
+ required: False
+ default: False
+ aliases: []
+ grace_period:
+ description:
+ - Grace period (seconds) for pods being deleted.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to attempt to force this action in openshift
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oadm manage-node --schedulable=true --selector=ops_node=new
+ oadm_manage_node:
+ selector: ops_node=new
+ schedulable: True
+ register: schedout
+
+- name: oadm manage-node my-k8s-node-5 --evacuate
+ oadm_manage_node:
+ node: my-k8s-node-5
+ evacuate: True
+ force: True
+'''
+
+# -*- -*- -*- End included fragment: doc/manage_node -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ returncode, stdout, stderr = self._run(cmds, input_data)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oadm_manage_node.py -*- -*- -*-
+
+
+class ManageNodeException(Exception):
+ ''' manage-node exception class '''
+ pass
+
+
+class ManageNodeConfig(OpenShiftCLIConfig):
+ ''' ManageNodeConfig is a DTO for the manage-node command.'''
+ def __init__(self, kubeconfig, node_options):
+ super(ManageNodeConfig, self).__init__(None, None, kubeconfig, node_options)
+
+
+# pylint: disable=too-many-instance-attributes
+class ManageNode(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for ManageNode '''
+ super(ManageNode, self).__init__(None, config.kubeconfig)
+ self.config = config
+
+ def evacuate(self):
+ ''' formulate the params and run oadm manage-node '''
+ return self._evacuate(node=self.config.config_options['node']['value'],
+ selector=self.config.config_options['selector']['value'],
+ pod_selector=self.config.config_options['pod_selector']['value'],
+ dry_run=self.config.config_options['dry_run']['value'],
+ grace_period=self.config.config_options['grace_period']['value'],
+ force=self.config.config_options['force']['value'],
+ )
+ def get_nodes(self, node=None, selector=''):
+ '''perform oc get node'''
+ _node = None
+ _sel = None
+ if node:
+ _node = node
+ if selector:
+ _sel = selector
+
+ results = self._get('node', rname=_node, selector=_sel)
+ if results['returncode'] != 0:
+ return results
+
+ nodes = []
+ items = None
+ if results['results'][0]['kind'] == 'List':
+ items = results['results'][0]['items']
+ else:
+ items = results['results']
+
+ for node in items:
+ _node = {}
+ _node['name'] = node['metadata']['name']
+ _node['schedulable'] = True
+ if 'unschedulable' in node['spec']:
+ _node['schedulable'] = False
+ nodes.append(_node)
+
+ return nodes
+
+ def get_pods_from_node(self, node, pod_selector=None):
+ '''return pods for a node'''
+ results = self._list_pods(node=[node], pod_selector=pod_selector)
+
+ if results['returncode'] != 0:
+ return results
+
+ # When a selector or node is matched it is returned along with the json.
+ # We are going to split the results based on the regexp and then
+ # load the json for each matching node.
+ # Before we return we are going to loop over the results and pull out the node names.
+ # {'node': [pod, pod], 'node': [pod, pod]}
+ # 3.2 includes the following lines in stdout: "Listing matched pods on node:"
+ all_pods = []
+ if "Listing matched" in results['results']:
+ listing_match = re.compile('\n^Listing matched.*$\n', flags=re.MULTILINE)
+ pods = listing_match.split(results['results'])
+ for pod in pods:
+ if pod:
+ all_pods.extend(json.loads(pod)['items'])
+
+ # 3.3 specific
+ else:
+ # this is gross but I filed a bug...
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1381621
+ # build our own json from the output.
+ all_pods = json.loads(results['results'])['items']
+
+ return all_pods
+
+ def list_pods(self):
+ ''' run oadm manage-node --list-pods'''
+ _nodes = self.config.config_options['node']['value']
+ _selector = self.config.config_options['selector']['value']
+ _pod_selector = self.config.config_options['pod_selector']['value']
+
+ if not _nodes:
+ _nodes = self.get_nodes(selector=_selector)
+ else:
+ _nodes = [{'name': name} for name in _nodes]
+
+ all_pods = {}
+ for node in _nodes:
+ results = self.get_pods_from_node(node['name'], pod_selector=_pod_selector)
+ if isinstance(results, dict):
+ return results
+ all_pods[node['name']] = results
+
+ results = {}
+ results['nodes'] = all_pods
+ results['returncode'] = 0
+ return results
+
+ def schedulable(self):
+ '''oadm manage-node call for making nodes unschedulable'''
+ nodes = self.config.config_options['node']['value']
+ selector = self.config.config_options['selector']['value']
+
+ if not nodes:
+ nodes = self.get_nodes(selector=selector)
+ else:
+ tmp_nodes = []
+ for name in nodes:
+ tmp_result = self.get_nodes(name)
+ if isinstance(tmp_result, dict):
+ tmp_nodes.append(tmp_result)
+ continue
+ tmp_nodes.extend(tmp_result)
+ nodes = tmp_nodes
+
+ # This is a short circuit based on the way we fetch nodes.
+ # If node is a dict/list then we've already fetched them.
+ for node in nodes:
+ if isinstance(node, dict) and 'returncode' in node:
+ return {'results': nodes, 'returncode': node['returncode']}
+ if isinstance(node, list) and 'returncode' in node[0]:
+ return {'results': nodes, 'returncode': node[0]['returncode']}
+ # check all the nodes that were returned and verify they are:
+ # node['schedulable'] == self.config.config_options['schedulable']['value']
+ if any([node['schedulable'] != self.config.config_options['schedulable']['value'] for node in nodes]):
+
+ results = self._schedulable(node=self.config.config_options['node']['value'],
+ selector=self.config.config_options['selector']['value'],
+ schedulable=self.config.config_options['schedulable']['value'])
+
+ # 'NAME STATUS AGE\\nip-172-31-49-140.ec2.internal Ready 4h\\n' # E501
+ # normalize formatting with previous return objects
+ if results['results'].startswith('NAME'):
+ nodes = []
+ # removing header line and trailing new line character of node lines
+ for node_results in results['results'].split('\n')[1:-1]:
+ parts = node_results.split()
+ nodes.append({'name': parts[0], 'schedulable': parts[1] == 'Ready'})
+ results['nodes'] = nodes
+
+ return results
+
+ results = {}
+ results['returncode'] = 0
+ results['changed'] = False
+ results['nodes'] = nodes
+
+ return results
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+ nconfig = ManageNodeConfig(params['kubeconfig'],
+ {'node': {'value': params['node'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'pod_selector': {'value': params['pod_selector'], 'include': True},
+ 'schedulable': {'value': params['schedulable'], 'include': True},
+ 'list_pods': {'value': params['list_pods'], 'include': True},
+ 'evacuate': {'value': params['evacuate'], 'include': True},
+ 'dry_run': {'value': params['dry_run'], 'include': True},
+ 'force': {'value': params['force'], 'include': True},
+ 'grace_period': {'value': params['grace_period'], 'include': True},
+ })
+
+ oadm_mn = ManageNode(nconfig)
+ # Run the oadm manage-node commands
+ results = None
+ changed = False
+ if params['schedulable'] != None:
+ if check_mode:
+ # schedulable returns results after the fact.
+ # We need to redo how this works to support check_mode completely.
+ return {'changed': True, 'msg': 'CHECK_MODE: would have called schedulable.'}
+ results = oadm_mn.schedulable()
+ if 'changed' not in results:
+ changed = True
+
+ if params['evacuate']:
+ results = oadm_mn.evacuate()
+ changed = True
+ elif params['list_pods']:
+ results = oadm_mn.list_pods()
+
+ if not results or results['returncode'] != 0:
+ return {'failed': True, 'msg': results}
+
+ return {'changed': changed, 'results': results, 'state': "present"}
+
+# -*- -*- -*- End included fragment: class/oadm_manage_node.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oadm_manage_node.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible oadm module for manage-node
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ debug=dict(default=False, type='bool'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ node=dict(default=None, type='list'),
+ selector=dict(default=None, type='str'),
+ pod_selector=dict(default=None, type='str'),
+ schedulable=dict(default=None, type='bool'),
+ list_pods=dict(default=False, type='bool'),
+ evacuate=dict(default=False, type='bool'),
+ dry_run=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool'),
+ grace_period=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[["selector", "node"], ['evacuate', 'list_pods'], ['list_pods', 'schedulable']],
+ required_one_of=[["node", "selector"]],
+
+ supports_check_mode=True,
+ )
+ results = ManageNode.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oadm_manage_node.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index ad158e56a..54036231b 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -936,6 +936,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -947,7 +959,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -959,18 +971,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index b5ccfe700..88880223d 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -915,6 +915,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -926,7 +938,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -938,18 +950,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index fb51367fc..0419c7bc7 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -940,6 +940,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -951,7 +963,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -963,18 +975,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 6ae85e220..7eec9f81a 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -890,6 +890,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -901,7 +913,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -913,18 +925,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
@@ -1644,6 +1651,8 @@ class OCScale(OpenShiftCLI):
state = params['state']
api_rval = oc_scale.get()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
#####
# Get
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 69dcb314b..f71291d53 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -936,6 +936,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -947,7 +959,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -959,18 +971,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
new file mode 100644
index 000000000..ff93b9b23
--- /dev/null
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -0,0 +1,1543 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/serviceaccount -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: oc_serviceaccount
+short_description: Module to manage openshift service accounts
+description:
+ - Manage openshift service accounts programmatically.
+options:
+ state:
+ description:
+ - If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: false
+ aliases: []
+ name:
+ description:
+ - Name of the service account.
+ required: true
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - Namespace of the service account.
+ required: true
+ default: default
+ aliases: []
+ secrets:
+ description:
+ - A list of secrets that are associated with the service account.
+ required: false
+ default: None
+ aliases: []
+ image_pull_secrets:
+ description:
+ - A list of the image pull secrets that are associated with the service account.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create registry serviceaccount
+ oc_serviceaccount:
+ name: registry
+ namespace: default
+ secrets:
+ - docker-registry-config
+ - registry-secret
+ register: sa_out
+'''
+
+# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+ elif rname:
+ cmd.append(rname)
+
+ cmd.extend(['-o', 'json'])
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ returncode, stdout, stderr = self._run(cmds, input_data)
+
+ rval = {"returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
+
+class ServiceAccountConfig(object):
+ '''Service account config class
+
+ This class stores the options and returns a default service account
+ '''
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
+ self.name = sname
+ self.kubeconfig = kubeconfig
+ self.namespace = namespace
+ self.secrets = secrets or []
+ self.image_pull_secrets = image_pull_secrets or []
+ self.data = {}
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a properly structured volume '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'ServiceAccount'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['secrets'] = []
+ if self.secrets:
+ for sec in self.secrets:
+ self.data['secrets'].append({"name": sec})
+
+ self.data['imagePullSecrets'] = []
+ if self.image_pull_secrets:
+ for sec in self.image_pull_secrets:
+ self.data['imagePullSecrets'].append({"name": sec})
+
+class ServiceAccount(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ image_pull_secrets_path = "imagePullSecrets"
+ secrets_path = "secrets"
+
+ def __init__(self, content):
+ '''ServiceAccount constructor'''
+ super(ServiceAccount, self).__init__(content=content)
+ self._secrets = None
+ self._image_pull_secrets = None
+
+ @property
+ def image_pull_secrets(self):
+ ''' property for image_pull_secrets '''
+ if self._image_pull_secrets is None:
+ self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
+ return self._image_pull_secrets
+
+ @image_pull_secrets.setter
+ def image_pull_secrets(self, secrets):
+ ''' property for secrets '''
+ self._image_pull_secrets = secrets
+
+ @property
+ def secrets(self):
+ ''' property for secrets '''
+ if not self._secrets:
+ self._secrets = self.get(ServiceAccount.secrets_path) or []
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self, secrets):
+ ''' property for secrets '''
+ self._secrets = secrets
+
+ def delete_secret(self, inc_secret):
+ ''' remove a secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.secrets[remove_idx]
+ return True
+
+ return False
+
+ def delete_image_pull_secret(self, inc_secret):
+ ''' remove a image_pull_secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.image_pull_secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.image_pull_secrets[remove_idx]
+ return True
+
+ return False
+
+ def find_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def find_image_pull_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.image_pull_secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def add_secret(self, inc_secret):
+ '''add secret'''
+ if self.secrets:
+ self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
+
+ def add_image_pull_secret(self, inc_secret):
+ '''add image_pull_secret'''
+ if self.image_pull_secrets:
+ self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
+
+# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/oc_serviceaccount.py -*- -*- -*-
+
+# pylint: disable=too-many-instance-attributes
+class OCServiceAccount(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'sa'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCServiceAccount, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self.service_account = None
+
+ def exists(self):
+ ''' return whether a volume exists '''
+ if self.service_account:
+ return True
+
+ return False
+
+ def get(self):
+ '''return volume information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.service_account = ServiceAccount(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ for secret in self.config.secrets:
+ result = self.service_account.find_secret(secret)
+ if not result:
+ self.service_account.add_secret(secret)
+
+ for secret in self.config.image_pull_secrets:
+ result = self.service_account.find_image_pull_secret(secret)
+ if not result:
+ self.service_account.add_image_pull_secret(secret)
+
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # since creating an service account generates secrets and imagepullsecrets
+ # check_def_equal will not work
+ # Instead, verify all secrets passed are in the list
+ for secret in self.config.secrets:
+ result = self.service_account.find_secret(secret)
+ if not result:
+ return True
+
+ for secret in self.config.image_pull_secrets:
+ result = self.service_account.find_image_pull_secret(secret)
+ if not result:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = ServiceAccountConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ params['secrets'],
+ params['image_pull_secrets'],
+ )
+
+ oc_sa = OCServiceAccount(rconfig,
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sa.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sa.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sa.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sa.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sa.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sa.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sa.needs_update():
+ api_rval = oc_sa.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sa.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
+
+# -*- -*- -*- End included fragment: class/oc_serviceaccount.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
+
+def main():
+ '''
+ ansible oc module for route
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ secrets=dict(default=None, type='list'),
+ image_pull_secrets=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCServiceAccount.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
+
+# -*- -*- -*- End included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index d74564352..4a20b72fe 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -860,6 +860,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -871,7 +883,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -883,18 +895,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/src/ansible/oadm_manage_node.py b/roles/lib_openshift/src/ansible/oadm_manage_node.py
new file mode 100644
index 000000000..b870c1211
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oadm_manage_node.py
@@ -0,0 +1,38 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oadm module for manage-node
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ debug=dict(default=False, type='bool'),
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ node=dict(default=None, type='list'),
+ selector=dict(default=None, type='str'),
+ pod_selector=dict(default=None, type='str'),
+ schedulable=dict(default=None, type='bool'),
+ list_pods=dict(default=False, type='bool'),
+ evacuate=dict(default=False, type='bool'),
+ dry_run=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool'),
+ grace_period=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[["selector", "node"], ['evacuate', 'list_pods'], ['list_pods', 'schedulable']],
+ required_one_of=[["node", "selector"]],
+
+ supports_check_mode=True,
+ )
+ results = ManageNode.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_serviceaccount.py b/roles/lib_openshift/src/ansible/oc_serviceaccount.py
new file mode 100644
index 000000000..ea9bdb455
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_serviceaccount.py
@@ -0,0 +1,30 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ '''
+ ansible oc module for route
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ secrets=dict(default=None, type='list'),
+ image_pull_secrets=dict(default=None, type='list'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCServiceAccount.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oadm_manage_node.py b/roles/lib_openshift/src/class/oadm_manage_node.py
new file mode 100644
index 000000000..61b6a5ebe
--- /dev/null
+++ b/roles/lib_openshift/src/class/oadm_manage_node.py
@@ -0,0 +1,209 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class ManageNodeException(Exception):
+ ''' manage-node exception class '''
+ pass
+
+
+class ManageNodeConfig(OpenShiftCLIConfig):
+ ''' ManageNodeConfig is a DTO for the manage-node command.'''
+ def __init__(self, kubeconfig, node_options):
+ super(ManageNodeConfig, self).__init__(None, None, kubeconfig, node_options)
+
+
+# pylint: disable=too-many-instance-attributes
+class ManageNode(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for ManageNode '''
+ super(ManageNode, self).__init__(None, config.kubeconfig)
+ self.config = config
+
+ def evacuate(self):
+ ''' formulate the params and run oadm manage-node '''
+ return self._evacuate(node=self.config.config_options['node']['value'],
+ selector=self.config.config_options['selector']['value'],
+ pod_selector=self.config.config_options['pod_selector']['value'],
+ dry_run=self.config.config_options['dry_run']['value'],
+ grace_period=self.config.config_options['grace_period']['value'],
+ force=self.config.config_options['force']['value'],
+ )
+ def get_nodes(self, node=None, selector=''):
+ '''perform oc get node'''
+ _node = None
+ _sel = None
+ if node:
+ _node = node
+ if selector:
+ _sel = selector
+
+ results = self._get('node', rname=_node, selector=_sel)
+ if results['returncode'] != 0:
+ return results
+
+ nodes = []
+ items = None
+ if results['results'][0]['kind'] == 'List':
+ items = results['results'][0]['items']
+ else:
+ items = results['results']
+
+ for node in items:
+ _node = {}
+ _node['name'] = node['metadata']['name']
+ _node['schedulable'] = True
+ if 'unschedulable' in node['spec']:
+ _node['schedulable'] = False
+ nodes.append(_node)
+
+ return nodes
+
+ def get_pods_from_node(self, node, pod_selector=None):
+ '''return pods for a node'''
+ results = self._list_pods(node=[node], pod_selector=pod_selector)
+
+ if results['returncode'] != 0:
+ return results
+
+ # When a selector or node is matched it is returned along with the json.
+ # We are going to split the results based on the regexp and then
+ # load the json for each matching node.
+ # Before we return we are going to loop over the results and pull out the node names.
+ # {'node': [pod, pod], 'node': [pod, pod]}
+ # 3.2 includes the following lines in stdout: "Listing matched pods on node:"
+ all_pods = []
+ if "Listing matched" in results['results']:
+ listing_match = re.compile('\n^Listing matched.*$\n', flags=re.MULTILINE)
+ pods = listing_match.split(results['results'])
+ for pod in pods:
+ if pod:
+ all_pods.extend(json.loads(pod)['items'])
+
+ # 3.3 specific
+ else:
+ # this is gross but I filed a bug...
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1381621
+ # build our own json from the output.
+ all_pods = json.loads(results['results'])['items']
+
+ return all_pods
+
+ def list_pods(self):
+ ''' run oadm manage-node --list-pods'''
+ _nodes = self.config.config_options['node']['value']
+ _selector = self.config.config_options['selector']['value']
+ _pod_selector = self.config.config_options['pod_selector']['value']
+
+ if not _nodes:
+ _nodes = self.get_nodes(selector=_selector)
+ else:
+ _nodes = [{'name': name} for name in _nodes]
+
+ all_pods = {}
+ for node in _nodes:
+ results = self.get_pods_from_node(node['name'], pod_selector=_pod_selector)
+ if isinstance(results, dict):
+ return results
+ all_pods[node['name']] = results
+
+ results = {}
+ results['nodes'] = all_pods
+ results['returncode'] = 0
+ return results
+
+ def schedulable(self):
+ '''oadm manage-node call for making nodes unschedulable'''
+ nodes = self.config.config_options['node']['value']
+ selector = self.config.config_options['selector']['value']
+
+ if not nodes:
+ nodes = self.get_nodes(selector=selector)
+ else:
+ tmp_nodes = []
+ for name in nodes:
+ tmp_result = self.get_nodes(name)
+ if isinstance(tmp_result, dict):
+ tmp_nodes.append(tmp_result)
+ continue
+ tmp_nodes.extend(tmp_result)
+ nodes = tmp_nodes
+
+ # This is a short circuit based on the way we fetch nodes.
+ # If node is a dict/list then we've already fetched them.
+ for node in nodes:
+ if isinstance(node, dict) and 'returncode' in node:
+ return {'results': nodes, 'returncode': node['returncode']}
+ if isinstance(node, list) and 'returncode' in node[0]:
+ return {'results': nodes, 'returncode': node[0]['returncode']}
+ # check all the nodes that were returned and verify they are:
+ # node['schedulable'] == self.config.config_options['schedulable']['value']
+ if any([node['schedulable'] != self.config.config_options['schedulable']['value'] for node in nodes]):
+
+ results = self._schedulable(node=self.config.config_options['node']['value'],
+ selector=self.config.config_options['selector']['value'],
+ schedulable=self.config.config_options['schedulable']['value'])
+
+ # 'NAME STATUS AGE\\nip-172-31-49-140.ec2.internal Ready 4h\\n' # E501
+ # normalize formatting with previous return objects
+ if results['results'].startswith('NAME'):
+ nodes = []
+ # removing header line and trailing new line character of node lines
+ for node_results in results['results'].split('\n')[1:-1]:
+ parts = node_results.split()
+ nodes.append({'name': parts[0], 'schedulable': parts[1] == 'Ready'})
+ results['nodes'] = nodes
+
+ return results
+
+ results = {}
+ results['returncode'] = 0
+ results['changed'] = False
+ results['nodes'] = nodes
+
+ return results
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the idempotent ansible code'''
+ nconfig = ManageNodeConfig(params['kubeconfig'],
+ {'node': {'value': params['node'], 'include': True},
+ 'selector': {'value': params['selector'], 'include': True},
+ 'pod_selector': {'value': params['pod_selector'], 'include': True},
+ 'schedulable': {'value': params['schedulable'], 'include': True},
+ 'list_pods': {'value': params['list_pods'], 'include': True},
+ 'evacuate': {'value': params['evacuate'], 'include': True},
+ 'dry_run': {'value': params['dry_run'], 'include': True},
+ 'force': {'value': params['force'], 'include': True},
+ 'grace_period': {'value': params['grace_period'], 'include': True},
+ })
+
+ oadm_mn = ManageNode(nconfig)
+ # Run the oadm manage-node commands
+ results = None
+ changed = False
+ if params['schedulable'] != None:
+ if check_mode:
+ # schedulable returns results after the fact.
+ # We need to redo how this works to support check_mode completely.
+ return {'changed': True, 'msg': 'CHECK_MODE: would have called schedulable.'}
+ results = oadm_mn.schedulable()
+ if 'changed' not in results:
+ changed = True
+
+ if params['evacuate']:
+ results = oadm_mn.evacuate()
+ changed = True
+ elif params['list_pods']:
+ results = oadm_mn.list_pods()
+
+ if not results or results['returncode'] != 0:
+ return {'failed': True, 'msg': results}
+
+ return {'changed': changed, 'results': results, 'state': "present"}
diff --git a/roles/lib_openshift/src/class/oc_scale.py b/roles/lib_openshift/src/class/oc_scale.py
index 19fba3af5..16255688b 100644
--- a/roles/lib_openshift/src/class/oc_scale.py
+++ b/roles/lib_openshift/src/class/oc_scale.py
@@ -77,6 +77,8 @@ class OCScale(OpenShiftCLI):
state = params['state']
api_rval = oc_scale.get()
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
#####
# Get
diff --git a/roles/lib_openshift/src/class/oc_serviceaccount.py b/roles/lib_openshift/src/class/oc_serviceaccount.py
new file mode 100644
index 000000000..47c7b5c94
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_serviceaccount.py
@@ -0,0 +1,165 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCServiceAccount(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'sa'
+
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCServiceAccount, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self.service_account = None
+
+ def exists(self):
+ ''' return whether a volume exists '''
+ if self.service_account:
+ return True
+
+ return False
+
+ def get(self):
+ '''return volume information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.service_account = ServiceAccount(content=result['results'][0])
+ elif '\"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ for secret in self.config.secrets:
+ result = self.service_account.find_secret(secret)
+ if not result:
+ self.service_account.add_secret(secret)
+
+ for secret in self.config.image_pull_secrets:
+ result = self.service_account.find_image_pull_secret(secret)
+ if not result:
+ self.service_account.add_image_pull_secret(secret)
+
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ # since creating an service account generates secrets and imagepullsecrets
+ # check_def_equal will not work
+ # Instead, verify all secrets passed are in the list
+ for secret in self.config.secrets:
+ result = self.service_account.find_secret(secret)
+ if not result:
+ return True
+
+ for secret in self.config.image_pull_secrets:
+ result = self.service_account.find_image_pull_secret(secret)
+ if not result:
+ return True
+
+ return False
+
+ @staticmethod
+ # pylint: disable=too-many-return-statements,too-many-branches
+ # TODO: This function should be refactored into its individual parts.
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ rconfig = ServiceAccountConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ params['secrets'],
+ params['image_pull_secrets'],
+ )
+
+ oc_sa = OCServiceAccount(rconfig,
+ verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_sa.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_sa.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a delete.'}
+
+ api_rval = oc_sa.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_sa.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'Would have performed a create.'}
+
+ # Create it here
+ api_rval = oc_sa.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sa.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ ########
+ # Update
+ ########
+ if oc_sa.needs_update():
+ api_rval = oc_sa.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = oc_sa.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': 'present'}
+
+ return {'changed': False, 'results': api_rval, 'state': 'present'}
+
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
diff --git a/roles/lib_openshift/src/doc/manage_node b/roles/lib_openshift/src/doc/manage_node
new file mode 100644
index 000000000..382377f3e
--- /dev/null
+++ b/roles/lib_openshift/src/doc/manage_node
@@ -0,0 +1,88 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oadm_manage_node
+short_description: Module to manage openshift nodes
+description:
+ - Manage openshift nodes programmatically.
+options:
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ node:
+ description:
+ - A list of the nodes being managed
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - The selector when filtering on node labels
+ required: false
+ default: None
+ aliases: []
+ pod_selector:
+ description:
+ - A selector when filtering on pod labels.
+ required: false
+ default: None
+ aliases: []
+ evacuate:
+ description:
+ - Remove all pods from a node.
+ required: false
+ default: False
+ aliases: []
+ schedulable:
+ description:
+ - whether or not openshift can schedule pods on this node
+ required: False
+ default: None
+ aliases: []
+ dry_run:
+ description:
+ - This shows the pods that would be migrated if evacuate were called
+ required: False
+ default: False
+ aliases: []
+ grace_period:
+ description:
+ - Grace period (seconds) for pods being deleted.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to attempt to force this action in openshift
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: oadm manage-node --schedulable=true --selector=ops_node=new
+ oadm_manage_node:
+ selector: ops_node=new
+ schedulable: True
+ register: schedout
+
+- name: oadm manage-node my-k8s-node-5 --evacuate
+ oadm_manage_node:
+ node: my-k8s-node-5
+ evacuate: True
+ force: True
+'''
diff --git a/roles/lib_openshift/src/doc/serviceaccount b/roles/lib_openshift/src/doc/serviceaccount
new file mode 100644
index 000000000..b2eafab51
--- /dev/null
+++ b/roles/lib_openshift/src/doc/serviceaccount
@@ -0,0 +1,68 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_serviceaccount
+short_description: Module to manage openshift service accounts
+description:
+ - Manage openshift service accounts programmatically.
+options:
+ state:
+ description:
+ - If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results.
+ required: false
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: false
+ aliases: []
+ name:
+ description:
+ - Name of the service account.
+ required: true
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - Namespace of the service account.
+ required: true
+ default: default
+ aliases: []
+ secrets:
+ description:
+ - A list of secrets that are associated with the service account.
+ required: false
+ default: None
+ aliases: []
+ image_pull_secrets:
+ description:
+ - A list of the image pull secrets that are associated with the service account.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: create registry serviceaccount
+ oc_serviceaccount:
+ name: registry
+ namespace: default
+ secrets:
+ - docker-registry-config
+ - registry-secret
+ register: sa_out
+'''
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 8b5491d6b..d0d6c7afc 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -205,6 +205,18 @@ class OpenShiftCLI(object):
cmd.append('--confirm')
return self.openshift_cmd(cmd)
+ def _run(self, cmds, input_data):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+
+ return proc.returncode, stdout, stderr
+
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
@@ -216,7 +228,7 @@ class OpenShiftCLI(object):
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
- elif self.namespace:
+ elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
@@ -228,18 +240,13 @@ class OpenShiftCLI(object):
if self.verbose:
print(' '.join(cmds))
- proc = subprocess.Popen(cmds,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
+ returncode, stdout, stderr = self._run(cmds, input_data)
- stdout, stderr = proc.communicate(input_data)
- rval = {"returncode": proc.returncode,
+ rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
- if proc.returncode == 0:
+ if returncode == 0:
if output:
if output_type == 'json':
try:
diff --git a/roles/lib_openshift/src/lib/serviceaccount.py b/roles/lib_openshift/src/lib/serviceaccount.py
new file mode 100644
index 000000000..47a55757e
--- /dev/null
+++ b/roles/lib_openshift/src/lib/serviceaccount.py
@@ -0,0 +1,129 @@
+# pylint: skip-file
+# flake8: noqa
+
+class ServiceAccountConfig(object):
+ '''Service account config class
+
+ This class stores the options and returns a default service account
+ '''
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
+ self.name = sname
+ self.kubeconfig = kubeconfig
+ self.namespace = namespace
+ self.secrets = secrets or []
+ self.image_pull_secrets = image_pull_secrets or []
+ self.data = {}
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a properly structured volume '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'ServiceAccount'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+
+ self.data['secrets'] = []
+ if self.secrets:
+ for sec in self.secrets:
+ self.data['secrets'].append({"name": sec})
+
+ self.data['imagePullSecrets'] = []
+ if self.image_pull_secrets:
+ for sec in self.image_pull_secrets:
+ self.data['imagePullSecrets'].append({"name": sec})
+
+class ServiceAccount(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ image_pull_secrets_path = "imagePullSecrets"
+ secrets_path = "secrets"
+
+ def __init__(self, content):
+ '''ServiceAccount constructor'''
+ super(ServiceAccount, self).__init__(content=content)
+ self._secrets = None
+ self._image_pull_secrets = None
+
+ @property
+ def image_pull_secrets(self):
+ ''' property for image_pull_secrets '''
+ if self._image_pull_secrets is None:
+ self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
+ return self._image_pull_secrets
+
+ @image_pull_secrets.setter
+ def image_pull_secrets(self, secrets):
+ ''' property for secrets '''
+ self._image_pull_secrets = secrets
+
+ @property
+ def secrets(self):
+ ''' property for secrets '''
+ if not self._secrets:
+ self._secrets = self.get(ServiceAccount.secrets_path) or []
+ return self._secrets
+
+ @secrets.setter
+ def secrets(self, secrets):
+ ''' property for secrets '''
+ self._secrets = secrets
+
+ def delete_secret(self, inc_secret):
+ ''' remove a secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.secrets[remove_idx]
+ return True
+
+ return False
+
+ def delete_image_pull_secret(self, inc_secret):
+ ''' remove a image_pull_secret '''
+ remove_idx = None
+ for idx, sec in enumerate(self.image_pull_secrets):
+ if sec['name'] == inc_secret:
+ remove_idx = idx
+ break
+
+ if remove_idx:
+ del self.image_pull_secrets[remove_idx]
+ return True
+
+ return False
+
+ def find_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def find_image_pull_secret(self, inc_secret):
+ '''find secret'''
+ for secret in self.image_pull_secrets:
+ if secret['name'] == inc_secret:
+ return secret
+
+ return None
+
+ def add_secret(self, inc_secret):
+ '''add secret'''
+ if self.secrets:
+ self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
+
+ def add_image_pull_secret(self, inc_secret):
+ '''add image_pull_secret'''
+ if self.image_pull_secrets:
+ self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
+ else:
+ self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
index 5afcdc55d..28929c02a 100644
--- a/roles/lib_openshift/src/sources.yml
+++ b/roles/lib_openshift/src/sources.yml
@@ -1,4 +1,14 @@
---
+oadm_manage_node.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/manage_node
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oadm_manage_node.py
+- ansible/oadm_manage_node.py
+
oc_edit.py:
- doc/generated
- doc/license
@@ -8,6 +18,7 @@ oc_edit.py:
- lib/base.py
- class/oc_edit.py
- ansible/oc_edit.py
+
oc_obj.py:
- doc/generated
- doc/license
@@ -17,6 +28,7 @@ oc_obj.py:
- lib/base.py
- class/oc_obj.py
- ansible/oc_obj.py
+
oc_route.py:
- doc/generated
- doc/license
@@ -27,6 +39,7 @@ oc_route.py:
- lib/route.py
- class/oc_route.py
- ansible/oc_route.py
+
oc_secret.py:
- doc/generated
- doc/license
@@ -37,6 +50,7 @@ oc_secret.py:
- lib/secret.py
- class/oc_secret.py
- ansible/oc_secret.py
+
oc_scale.py:
- doc/generated
- doc/license
@@ -48,6 +62,7 @@ oc_scale.py:
- lib/replicationcontroller.py
- class/oc_scale.py
- ansible/oc_scale.py
+
oc_version.py:
- doc/generated
- doc/license
@@ -57,3 +72,14 @@ oc_version.py:
- lib/base.py
- class/oc_version.py
- ansible/oc_version.py
+
+oc_serviceaccount.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/serviceaccount
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/serviceaccount.py
+- class/oc_serviceaccount.py
+- ansible/oc_serviceaccount.py
diff --git a/roles/lib_openshift/src/test/integration/oadm_manage_node.yml b/roles/lib_openshift/src/test/integration/oadm_manage_node.yml
new file mode 100755
index 000000000..69a701b17
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oadm_manage_node.yml
@@ -0,0 +1,58 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oadm_manage_node.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER cli_node_test=$OPENSHIFT_NODE
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: list pods from a node
+ oadm_manage_node:
+ list_pods: True
+ node:
+ - "{{ cli_node_test }}"
+ register: podout
+ - debug: var=podout
+
+ - assert:
+ that: "'{{ cli_node_test }}' in podout.results.nodes"
+ msg: Pod data was not returned
+
+ - name: set node to unschedulable
+ oadm_manage_node:
+ schedulable: False
+ node:
+ - "{{ cli_node_test }}"
+ register: nodeout
+ - debug: var=nodeout
+
+ - name: assert that schedulable=False
+ assert:
+ that: nodeout.results.nodes[0]['schedulable'] == False
+ msg: "{{ cli_node_test }} schedulable set to True"
+
+ - name: get node scheduable
+ oc_obj:
+ kind: node
+ state: list
+ name: "{{ cli_node_test }}"
+ namespace: None
+ register: nodeout
+
+ - debug: var=nodeout
+
+ - name: assert that schedulable=False
+ assert:
+ that: nodeout.results.results[0]['spec']['unschedulable']
+
+ - name: set node to schedulable
+ oadm_manage_node:
+ schedulable: True
+ node:
+ - "{{ cli_node_test }}"
+ register: nodeout
+ - debug: var=nodeout
+
+ - name: assert that schedulable=False
+ assert:
+ that: nodeout.results.nodes[0]['schedulable']
+ msg: "{{ cli_node_test }} schedulable set to False"
diff --git a/roles/lib_openshift/src/test/integration/oc_scale.yml b/roles/lib_openshift/src/test/integration/oc_scale.yml
index e96e16820..43a42c589 100755
--- a/roles/lib_openshift/src/test/integration/oc_scale.yml
+++ b/roles/lib_openshift/src/test/integration/oc_scale.yml
@@ -90,3 +90,22 @@
- "'results' in pods and 'results' in pods.results"
- "{{ pods.results.results[0]['items']|length }} == 2"
msg: "Did not find 1 replica in scale results."
+
+
+ # Test scale on non-existent dc
+ - name: scale non-existent dc
+ oc_scale:
+ name: not_there
+ kind: dc
+ replicas: 2
+ register: scaleout
+ ignore_errors: True
+
+ - debug: var=scaleout
+
+ - assert:
+ that:
+ - scaleout.changed == False
+ - scaleout.msg.returncode == 1
+ - "'msg' in scaleout and 'stderr' in scaleout.msg"
+ msg: "Deploymentconfig exists. This should error."
diff --git a/roles/lib_openshift/src/test/integration/oc_serviceaccount.yml b/roles/lib_openshift/src/test/integration/oc_serviceaccount.yml
new file mode 100755
index 000000000..46369b8f4
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_serviceaccount.yml
@@ -0,0 +1,101 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ vars_prompt:
+ - name: cli_master_test
+ prompt: "Master to run against"
+ private: false
+ default: localhost
+
+ vars:
+ service_account_name: serviceaccount-int-test
+ ns_name: default
+
+ post_tasks:
+ - name: Make sure we start clean - Arrange
+ oc_serviceaccount:
+ state: absent
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+
+ - name: List when account does not exist - Act
+ oc_serviceaccount:
+ state: list
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+ register: saout
+
+ - name: List when account does not exist - Assert
+ assert:
+ that:
+ - "saout.changed == False"
+ - "saout.state == 'list'"
+ - "saout.results == [{}]"
+
+ - name: create serviceaccount - Act
+ oc_serviceaccount:
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+ secrets:
+ - one
+ - two
+ - three
+ register: saout
+
+ - name: create serviceaccount - Assert
+ assert:
+ that:
+ - "saout.changed == True"
+ - "saout.state == 'present'"
+ - "saout.results.returncode == 0"
+ - "saout.results.results.0.metadata.name == '{{ service_account_name }}'"
+ - "saout.results.results.0.metadata.namespace == '{{ ns_name }}'"
+
+ - name: create serviceaccount - check idempotency - Act
+ oc_serviceaccount:
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+ secrets:
+ - one
+ - two
+ - three
+ register: saout
+
+ - name: create serviceaccount - check idempotency - Assert
+ assert:
+ that:
+ - "saout.changed == False"
+ - "saout.state == 'present'"
+ - "saout.results.returncode == 0"
+ - "saout.results.results.0.metadata.name == '{{ service_account_name }}'"
+ - "saout.results.results.0.metadata.namespace == '{{ ns_name }}'"
+
+ - name: Delete serviceaccount - Act
+ oc_serviceaccount:
+ state: absent
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+ register: saout
+
+ - name: Delete serviceaccount - Assert
+ assert:
+ that:
+ - "saout.changed == True"
+ - "saout.state == 'absent'"
+ - "saout.results.returncode == 0"
+
+ - name: Delete serviceaccount - check idempotency - Act
+ oc_serviceaccount:
+ state: absent
+ name: "{{ service_account_name }}"
+ namespace: "{{ ns_name }}"
+ register: saout
+
+ - name: Delete serviceaccount - check idempotency - Assert
+ assert:
+ that:
+ - "saout.changed == False"
+ - "saout.state == 'absent'"
diff --git a/roles/lib_openshift/src/test/unit/oadm_manage_node.py b/roles/lib_openshift/src/test/unit/oadm_manage_node.py
new file mode 100755
index 000000000..8fd6f9c55
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oadm_manage_node.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oadm_manage_node
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 2 tests in 0.001s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oadm_manage_node import ManageNode # noqa: E402
+
+
+class ManageNodeTest(unittest.TestCase):
+ '''
+ Test class for oadm_manage_node
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ def test_list_pods(self, mock_openshift_cmd):
+ ''' Testing a get '''
+ params = {'node': ['ip-172-31-49-140.ec2.internal'],
+ 'schedulable': None,
+ 'selector': None,
+ 'pod_selector': None,
+ 'list_pods': True,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'evacuate': False,
+ 'grace_period': False,
+ 'dry_run': False,
+ 'force': False}
+
+ pod_list = '''{
+ "metadata": {},
+ "items": [
+ {
+ "metadata": {
+ "name": "docker-registry-1-xuhik",
+ "generateName": "docker-registry-1-",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/pods/docker-registry-1-xuhik",
+ "uid": "ae2a25a2-e316-11e6-80eb-0ecdc51fcfc4",
+ "resourceVersion": "1501",
+ "creationTimestamp": "2017-01-25T15:55:23Z",
+ "labels": {
+ "deployment": "docker-registry-1",
+ "deploymentconfig": "docker-registry",
+ "docker-registry": "default"
+ },
+ "annotations": {
+ "openshift.io/deployment-config.latest-version": "1",
+ "openshift.io/deployment-config.name": "docker-registry",
+ "openshift.io/deployment.name": "docker-registry-1",
+ "openshift.io/scc": "restricted"
+ }
+ },
+ "spec": {}
+ },
+ {
+ "metadata": {
+ "name": "router-1-kp3m3",
+ "generateName": "router-1-",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/pods/router-1-kp3m3",
+ "uid": "9e71f4a5-e316-11e6-80eb-0ecdc51fcfc4",
+ "resourceVersion": "1456",
+ "creationTimestamp": "2017-01-25T15:54:56Z",
+ "labels": {
+ "deployment": "router-1",
+ "deploymentconfig": "router",
+ "router": "router"
+ },
+ "annotations": {
+ "openshift.io/deployment-config.latest-version": "1",
+ "openshift.io/deployment-config.name": "router",
+ "openshift.io/deployment.name": "router-1",
+ "openshift.io/scc": "hostnetwork"
+ }
+ },
+ "spec": {}
+ }]
+}'''
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --list-pods",
+ "results": pod_list,
+ "returncode": 0}
+ ]
+
+ results = ManageNode.run_ansible(params, False)
+
+ # returned a single node
+ self.assertTrue(len(results['results']['nodes']) == 1)
+ # returned 2 pods
+ self.assertTrue(len(results['results']['nodes']['ip-172-31-49-140.ec2.internal']) == 2)
+
+ @mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
+ def test_schedulable_false(self, mock_openshift_cmd):
+ ''' Testing a get '''
+ params = {'node': ['ip-172-31-49-140.ec2.internal'],
+ 'schedulable': False,
+ 'selector': None,
+ 'pod_selector': None,
+ 'list_pods': False,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'evacuate': False,
+ 'grace_period': False,
+ 'dry_run': False,
+ 'force': False}
+
+ node = [{
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "creationTimestamp": "2017-01-26T14:34:43Z",
+ "labels": {
+ "beta.kubernetes.io/arch": "amd64",
+ "beta.kubernetes.io/instance-type": "m4.large",
+ "beta.kubernetes.io/os": "linux",
+ "failure-domain.beta.kubernetes.io/region": "us-east-1",
+ "failure-domain.beta.kubernetes.io/zone": "us-east-1c",
+ "hostname": "opstest-node-compute-0daaf",
+ "kubernetes.io/hostname": "ip-172-31-51-111.ec2.internal",
+ "ops_node": "old",
+ "region": "us-east-1",
+ "type": "compute"
+ },
+ "name": "ip-172-31-51-111.ec2.internal",
+ "resourceVersion": "6936",
+ "selfLink": "/api/v1/nodes/ip-172-31-51-111.ec2.internal",
+ "uid": "93d7fdfb-e3d4-11e6-a982-0e84250fc302"
+ },
+ "spec": {
+ "externalID": "i-06bb330e55c699b0f",
+ "providerID": "aws:///us-east-1c/i-06bb330e55c699b0f",
+ }}]
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": "/usr/bin/oc get node -o json ip-172-31-49-140.ec2.internal",
+ "results": node,
+ "returncode": 0},
+ {"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --schedulable=False",
+ "results": "NAME STATUS AGE\n" +
+ "ip-172-31-49-140.ec2.internal Ready,SchedulingDisabled 5h\n",
+ "returncode": 0}]
+ results = ManageNode.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['nodes'][0]['name'], 'ip-172-31-49-140.ec2.internal')
+ self.assertEqual(results['results']['nodes'][0]['schedulable'], False)
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_openshift/src/test/unit/oc_scale.py b/roles/lib_openshift/src/test/unit/oc_scale.py
index c523592de..d8d5a231f 100755
--- a/roles/lib_openshift/src/test/unit/oc_scale.py
+++ b/roles/lib_openshift/src/test/unit/oc_scale.py
@@ -119,6 +119,30 @@ class OCScaleTest(unittest.TestCase):
self.assertFalse(results['changed'])
self.assertEqual(results['result'][0], 3)
+ @mock.patch('oc_scale.OCScale.openshift_cmd')
+ def test_no_dc_scale(self, mock_openshift_cmd):
+ ''' Testing a get '''
+ params = {'name': 'not_there',
+ 'namespace': 'default',
+ 'replicas': 3,
+ 'state': 'present',
+ 'kind': 'dc',
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ mock_openshift_cmd.side_effect = [
+ {"cmd": '/usr/bin/oc -n default get dc not_there -o json',
+ 'results': [{}],
+ 'returncode': 1,
+ 'stderr': "Error from server: deploymentconfigs \"not_there\" not found\n",
+ 'stdout': ""},
+ ]
+
+ results = OCScale.run_ansible(params, False)
+
+ self.assertTrue(results['failed'])
+ self.assertEqual(results['msg']['returncode'], 1)
+
def tearDown(self):
'''TearDown method'''
pass
diff --git a/roles/lib_openshift/src/test/unit/oc_secret.py b/roles/lib_openshift/src/test/unit/oc_secret.py
index 221f00ed6..835918b95 100755
--- a/roles/lib_openshift/src/test/unit/oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/oc_secret.py
@@ -81,7 +81,7 @@ class OCSecretTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_openshift_cmd.assert_has_calls([
- mock.call(['get', 'secrets', '-o', 'json', 'secretname'], output=True),
+ mock.call(['get', 'secrets', 'secretname', '-o', 'json'], output=True),
mock.call(['secrets', 'new', 'secretname', 'somesecret.json=/tmp/somesecret.json']),
])
diff --git a/roles/lib_openshift/src/test/unit/oc_serviceaccount.py b/roles/lib_openshift/src/test/unit/oc_serviceaccount.py
new file mode 100755
index 000000000..faf0bfeb5
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_serviceaccount.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc serviceaccount
+'''
+# To run:
+# ./oc_serviceaccount.py
+#
+# .
+# Ran 1 test in 0.002s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_serviceaccount import OCServiceAccount # noqa: E402
+
+
+class OCServiceAccountTest(unittest.TestCase):
+ '''
+ Test class for OCServiceAccount
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ pass
+
+ @mock.patch('oc_serviceaccount.OCServiceAccount._run')
+ def test_adding_a_serviceaccount(self, mock_cmd):
+ ''' Testing adding a serviceaccount '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'state': 'present',
+ 'debug': False,
+ 'name': 'testserviceaccountname',
+ 'namespace': 'default',
+ 'secrets': None,
+ 'image_pull_secrets': None,
+ }
+
+ valid_result_json = '''{
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "testserviceaccountname",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/serviceaccounts/testserviceaccountname",
+ "uid": "4d8320c9-e66f-11e6-8edc-0eece8f2ce22",
+ "resourceVersion": "328450",
+ "creationTimestamp": "2017-01-29T22:07:19Z"
+ },
+ "secrets": [
+ {
+ "name": "testserviceaccountname-dockercfg-4lqd0"
+ },
+ {
+ "name": "testserviceaccountname-token-9h0ej"
+ }
+ ],
+ "imagePullSecrets": [
+ {
+ "name": "testserviceaccountname-dockercfg-4lqd0"
+ }
+ ]
+ }'''
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ # First call to mock
+ (1, '', 'Error from server: serviceaccounts "testserviceaccountname" not found'),
+
+ # Second call to mock
+ (0, 'serviceaccount "testserviceaccountname" created', ''),
+
+ # Third call to mock
+ (0, valid_result_json, ''),
+ ]
+
+ # Act
+ results = OCServiceAccount.run_ansible(params, False)
+
+ # Assert
+ self.assertTrue(results['changed'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['state'], 'present')
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['/usr/bin/oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
+ mock.call(['/usr/bin/oc', '-n', 'default', 'create', '-f', '/tmp/testserviceaccountname'], None),
+ mock.call(['/usr/bin/oc', '-n', 'default', 'get', 'sa', 'testserviceaccountname', '-o', 'json'], None),
+ ])
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py
new file mode 100644
index 000000000..7f0105290
--- /dev/null
+++ b/roles/lib_utils/library/repoquery.py
@@ -0,0 +1,607 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
+
+# pylint: disable=wrong-import-order,wrong-import-position,unused-import
+
+from __future__ import print_function # noqa: F401
+import json # noqa: F401
+import os # noqa: F401
+import re # noqa: F401
+# pylint: disable=import-error
+import ruamel.yaml as yaml # noqa: F401
+import shutil # noqa: F401
+
+from ansible.module_utils.basic import AnsibleModule
+
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: doc/repoquery -*- -*- -*-
+
+DOCUMENTATION = '''
+---
+module: repoquery
+short_description: Query package information from Yum repositories
+description:
+ - Query package information from Yum repositories.
+options:
+ state:
+ description:
+ - The expected state. Currently only supports list.
+ required: false
+ default: list
+ choices: ["list"]
+ aliases: []
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ default: None
+ aliases: []
+ query_type:
+ description:
+ - Narrows the packages queried based off of this value.
+ - If repos, it narrows the query to repositories defined on the machine.
+ - If installed, it narrows the query to only packages installed on the machine.
+ - If available, it narrows the query to packages that are available to be installed.
+ - If recent, it narrows the query to only recently edited packages.
+ - If updates, it narrows the query to only packages that are updates to existing installed packages.
+ - If extras, it narrows the query to packages that are not present in any of the available repositories.
+ - If all, it queries all of the above.
+ required: false
+ default: repos
+ aliases: []
+ verbose:
+ description:
+ - Shows more detail for the requested query.
+ required: false
+ default: false
+ aliases: []
+ show_duplicates:
+ description:
+ - Shows multiple versions of a package.
+ required: false
+ default: false
+ aliases: []
+ match_version:
+ description:
+ - Match the specific version given to the package.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Matt Woodson <mwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Example 1: Get bash versions
+ - name: Get bash version
+ repoquery:
+ name: bash
+ show_duplicates: True
+ register: bash_out
+
+# Results:
+# ok: [localhost] => {
+# "bash_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
+# "package_found": true,
+# "package_name": "bash",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "4.2.45",
+# "4.2.45",
+# "4.2.45",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46"
+# ],
+# "available_versions_full": [
+# "4.2.45-5.el7",
+# "4.2.45-5.el7_0.2",
+# "4.2.45-5.el7_0.4",
+# "4.2.46-12.el7",
+# "4.2.46-19.el7",
+# "4.2.46-20.el7_2",
+# "4.2.46-21.el7_3"
+# ],
+# "latest": "4.2.46",
+# "latest_full": "4.2.46-21.el7_3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+
+
+# Example 2: Get bash versions verbosely
+ - name: Get bash versions verbosely
+ repoquery:
+ name: bash
+ show_duplicates: True
+ verbose: True
+ register: bash_out
+
+# Results:
+# ok: [localhost] => {
+# "bash_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
+# "package_found": true,
+# "package_name": "bash",
+# "raw_versions": {
+# "4.2.45-5.el7": {
+# "arch": "x86_64",
+# "release": "5.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7"
+# },
+# "4.2.45-5.el7_0.2": {
+# "arch": "x86_64",
+# "release": "5.el7_0.2",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7_0.2"
+# },
+# "4.2.45-5.el7_0.4": {
+# "arch": "x86_64",
+# "release": "5.el7_0.4",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7_0.4"
+# },
+# "4.2.46-12.el7": {
+# "arch": "x86_64",
+# "release": "12.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-12.el7"
+# },
+# "4.2.46-19.el7": {
+# "arch": "x86_64",
+# "release": "19.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-19.el7"
+# },
+# "4.2.46-20.el7_2": {
+# "arch": "x86_64",
+# "release": "20.el7_2",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-20.el7_2"
+# },
+# "4.2.46-21.el7_3": {
+# "arch": "x86_64",
+# "release": "21.el7_3",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-21.el7_3"
+# }
+# },
+# "results": "4.2.45|5.el7|x86_64|rhel-7-server-rpms|4.2.45-5.el7\n4.2.45|5.el7_0.2|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.2\n4.2.45|5.el7_0.4|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.4\n4.2.46|12.el7|x86_64|rhel-7-server-rpms|4.2.46-12.el7\n4.2.46|19.el7|x86_64|rhel-7-server-rpms|4.2.46-19.el7\n4.2.46|20.el7_2|x86_64|rhel-7-server-rpms|4.2.46-20.el7_2\n4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3\n",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "4.2.45",
+# "4.2.45",
+# "4.2.45",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46"
+# ],
+# "available_versions_full": [
+# "4.2.45-5.el7",
+# "4.2.45-5.el7_0.2",
+# "4.2.45-5.el7_0.4",
+# "4.2.46-12.el7",
+# "4.2.46-19.el7",
+# "4.2.46-20.el7_2",
+# "4.2.46-21.el7_3"
+# ],
+# "latest": "4.2.46",
+# "latest_full": "4.2.46-21.el7_3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+# Example 3: Match a specific version
+ - name: matched versions repoquery test
+ repoquery:
+ name: atomic-openshift
+ show_duplicates: True
+ match_version: 3.3
+ register: openshift_out
+
+# Result:
+
+# ok: [localhost] => {
+# "openshift_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates atomic-openshift",
+# "package_found": true,
+# "package_name": "atomic-openshift",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "3.2.0.43",
+# "3.2.1.23",
+# "3.3.0.32",
+# "3.3.0.34",
+# "3.3.0.35",
+# "3.3.1.3",
+# "3.3.1.4",
+# "3.3.1.5",
+# "3.3.1.7",
+# "3.4.0.39"
+# ],
+# "available_versions_full": [
+# "3.2.0.43-1.git.0.672599f.el7",
+# "3.2.1.23-1.git.0.88a7a1d.el7",
+# "3.3.0.32-1.git.0.37bd7ea.el7",
+# "3.3.0.34-1.git.0.83f306f.el7",
+# "3.3.0.35-1.git.0.d7bd9b6.el7",
+# "3.3.1.3-1.git.0.86dc49a.el7",
+# "3.3.1.4-1.git.0.7c8657c.el7",
+# "3.3.1.5-1.git.0.62700af.el7",
+# "3.3.1.7-1.git.0.0988966.el7",
+# "3.4.0.39-1.git.0.5f32f06.el7"
+# ],
+# "latest": "3.4.0.39",
+# "latest_full": "3.4.0.39-1.git.0.5f32f06.el7",
+# "matched_version_found": true,
+# "matched_version_full_latest": "3.3.1.7-1.git.0.0988966.el7",
+# "matched_version_latest": "3.3.1.7",
+# "matched_versions": [
+# "3.3.0.32",
+# "3.3.0.34",
+# "3.3.0.35",
+# "3.3.1.3",
+# "3.3.1.4",
+# "3.3.1.5",
+# "3.3.1.7"
+# ],
+# "matched_versions_full": [
+# "3.3.0.32-1.git.0.37bd7ea.el7",
+# "3.3.0.34-1.git.0.83f306f.el7",
+# "3.3.0.35-1.git.0.d7bd9b6.el7",
+# "3.3.1.3-1.git.0.86dc49a.el7",
+# "3.3.1.4-1.git.0.7c8657c.el7",
+# "3.3.1.5-1.git.0.62700af.el7",
+# "3.3.1.7-1.git.0.0988966.el7"
+# ],
+# "requested_match_version": "3.3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+'''
+
+# -*- -*- -*- End included fragment: doc/repoquery -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: lib/repoquery.py -*- -*- -*-
+
+'''
+ class that wraps the repoquery commands in a subprocess
+'''
+
+# pylint: disable=too-many-lines,wrong-import-position,wrong-import-order
+
+from collections import defaultdict # noqa: E402
+
+
+# pylint: disable=no-name-in-module,import-error
+# Reason: pylint errors with "No name 'version' in module 'distutils'".
+# This is a bug: https://github.com/PyCQA/pylint/issues/73
+from distutils.version import LooseVersion # noqa: E402
+
+import subprocess # noqa: E402
+
+
+class RepoqueryCLIError(Exception):
+ '''Exception class for repoquerycli'''
+ pass
+
+
+def _run(cmds):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ stdout, stderr = proc.communicate()
+
+ return proc.returncode, stdout, stderr
+
+
+# pylint: disable=too-few-public-methods
+class RepoqueryCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ verbose=False):
+ ''' Constructor for RepoqueryCLI '''
+ self.verbose = verbose
+ self.verbose = True
+
+ def _repoquery_cmd(self, cmd, output=False, output_type='json'):
+ '''Base command for repoquery '''
+ cmds = ['/usr/bin/repoquery', '--plugins', '--quiet']
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ returncode, stdout, stderr = _run(cmds)
+
+ rval = {
+ "returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds),
+ }
+
+ if returncode == 0:
+ if output:
+ if output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print(stdout)
+ print(stderr)
+
+ if err:
+ rval.update({
+ "err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds
+ })
+
+ else:
+ rval.update({
+ "stderr": stderr,
+ "stdout": stdout,
+ "results": {},
+ })
+
+ return rval
+
+# -*- -*- -*- End included fragment: lib/repoquery.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: class/repoquery.py -*- -*- -*-
+
+
+class Repoquery(RepoqueryCLI):
+ ''' Class to wrap the repoquery
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self, name, query_type, show_duplicates,
+ match_version, verbose):
+ ''' Constructor for YumList '''
+ super(Repoquery, self).__init__(None)
+ self.name = name
+ self.query_type = query_type
+ self.show_duplicates = show_duplicates
+ self.match_version = match_version
+ self.verbose = verbose
+
+ if self.match_version:
+ self.show_duplicates = True
+
+ self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+
+ def build_cmd(self):
+ ''' build the repoquery cmd options '''
+
+ repo_cmd = []
+
+ repo_cmd.append("--pkgnarrow=" + self.query_type)
+ repo_cmd.append("--queryformat=" + self.query_format)
+
+ if self.show_duplicates:
+ repo_cmd.append('--show-duplicates')
+
+ repo_cmd.append(self.name)
+
+ return repo_cmd
+
+ @staticmethod
+ def process_versions(query_output):
+ ''' format the package data into something that can be presented '''
+
+ version_dict = defaultdict(dict)
+
+ for version in query_output.split('\n'):
+ pkg_info = version.split("|")
+
+ pkg_version = {}
+ pkg_version['version'] = pkg_info[0]
+ pkg_version['release'] = pkg_info[1]
+ pkg_version['arch'] = pkg_info[2]
+ pkg_version['repo'] = pkg_info[3]
+ pkg_version['version_release'] = pkg_info[4]
+
+ version_dict[pkg_info[4]] = pkg_version
+
+ return version_dict
+
+ def format_versions(self, formatted_versions):
+ ''' Gather and present the versions of each package '''
+
+ versions_dict = {}
+ versions_dict['available_versions_full'] = formatted_versions.keys()
+
+ # set the match version, if called
+ if self.match_version:
+ versions_dict['matched_versions_full'] = []
+ versions_dict['requested_match_version'] = self.match_version
+ versions_dict['matched_versions'] = []
+
+ # get the "full version (version - release)
+ versions_dict['available_versions_full'].sort(key=LooseVersion)
+ versions_dict['latest_full'] = versions_dict['available_versions_full'][-1]
+
+ # get the "short version (version)
+ versions_dict['available_versions'] = []
+ for version in versions_dict['available_versions_full']:
+ versions_dict['available_versions'].append(formatted_versions[version]['version'])
+
+ if self.match_version:
+ if version.startswith(self.match_version):
+ versions_dict['matched_versions_full'].append(version)
+ versions_dict['matched_versions'].append(formatted_versions[version]['version'])
+
+ versions_dict['available_versions'].sort(key=LooseVersion)
+ versions_dict['latest'] = versions_dict['available_versions'][-1]
+
+ # finish up the matched version
+ if self.match_version:
+ if versions_dict['matched_versions_full']:
+ versions_dict['matched_version_found'] = True
+ versions_dict['matched_versions'].sort(key=LooseVersion)
+ versions_dict['matched_version_latest'] = versions_dict['matched_versions'][-1]
+ versions_dict['matched_version_full_latest'] = versions_dict['matched_versions_full'][-1]
+ else:
+ versions_dict['matched_version_found'] = False
+ versions_dict['matched_versions'] = []
+ versions_dict['matched_version_latest'] = ""
+ versions_dict['matched_version_full_latest'] = ""
+
+ return versions_dict
+
+ def repoquery(self):
+ '''perform a repoquery '''
+
+ repoquery_cmd = self.build_cmd()
+
+ rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
+
+ # check to see if there are actual results
+ if rval['results']:
+ processed_versions = Repoquery.process_versions(rval['results'].strip())
+ formatted_versions = self.format_versions(processed_versions)
+
+ rval['package_found'] = True
+ rval['versions'] = formatted_versions
+ rval['package_name'] = self.name
+
+ if self.verbose:
+ rval['raw_versions'] = processed_versions
+ else:
+ del rval['results']
+
+ # No packages found
+ else:
+ rval['package_found'] = False
+
+ return rval
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ repoquery = Repoquery(
+ params['name'],
+ params['query_type'],
+ params['show_duplicates'],
+ params['match_version'],
+ params['verbose'],
+ )
+
+ state = params['state']
+
+ if state == 'list':
+ results = repoquery.repoquery()
+
+ if results['returncode'] != 0:
+ return {'failed': True,
+ 'msg': results}
+
+ return {'changed': False, 'results': results, 'state': 'list', 'check_mode': check_mode}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
+
+# -*- -*- -*- End included fragment: class/repoquery.py -*- -*- -*-
+
+# -*- -*- -*- Begin included fragment: ansible/repoquery.py -*- -*- -*-
+
+
+def main():
+ '''
+ ansible repoquery module
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='list', type='str', choices=['list']),
+ name=dict(default=None, required=True, type='str'),
+ query_type=dict(default='repos', required=False, type='str',
+ choices=[
+ 'installed', 'available', 'recent',
+ 'updates', 'extras', 'all', 'repos'
+ ]),
+ verbose=dict(default=False, required=False, type='bool'),
+ show_duplicates=dict(default=False, required=False, type='bool'),
+ match_version=dict(default=None, required=False, type='str'),
+ ),
+ supports_check_mode=False,
+ required_if=[('show_duplicates', True, ['name'])],
+ )
+
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == "__main__":
+ main()
+
+# -*- -*- -*- End included fragment: ansible/repoquery.py -*- -*- -*-
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
index 8a2bd92f9..7ad2b7181 100644
--- a/roles/lib_utils/library/yedit.py
+++ b/roles/lib_utils/library/yedit.py
@@ -24,18 +24,21 @@
# limitations under the License.
#
-# -*- -*- -*- Begin included fragment: class/import.py -*- -*- -*-
+# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
-# pylint: disable=wrong-import-order
-import json
-import os
-import re
+# pylint: disable=wrong-import-order,wrong-import-position,unused-import
+
+from __future__ import print_function # noqa: F401
+import json # noqa: F401
+import os # noqa: F401
+import re # noqa: F401
# pylint: disable=import-error
-import ruamel.yaml as yaml
-import shutil
+import ruamel.yaml as yaml # noqa: F401
+import shutil # noqa: F401
+
from ansible.module_utils.basic import AnsibleModule
-# -*- -*- -*- End included fragment: class/import.py -*- -*- -*-
+# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/yedit -*- -*- -*-
diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py
new file mode 100644
index 000000000..cb4efa6c1
--- /dev/null
+++ b/roles/lib_utils/src/ansible/repoquery.py
@@ -0,0 +1,35 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible repoquery module
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='list', type='str', choices=['list']),
+ name=dict(default=None, required=True, type='str'),
+ query_type=dict(default='repos', required=False, type='str',
+ choices=[
+ 'installed', 'available', 'recent',
+ 'updates', 'extras', 'all', 'repos'
+ ]),
+ verbose=dict(default=False, required=False, type='bool'),
+ show_duplicates=dict(default=False, required=False, type='bool'),
+ match_version=dict(default=None, required=False, type='str'),
+ ),
+ supports_check_mode=False,
+ required_if=[('show_duplicates', True, ['name'])],
+ )
+
+ rval = Repoquery.run_ansible(module.params, module.check_mode)
+
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/lib_utils/src/class/import.py b/roles/lib_utils/src/class/import.py
deleted file mode 100644
index 249e07228..000000000
--- a/roles/lib_utils/src/class/import.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-# pylint: skip-file
-
-# pylint: disable=wrong-import-order
-import json
-import os
-import re
-# pylint: disable=import-error
-import ruamel.yaml as yaml
-import shutil
-from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/class/repoquery.py b/roles/lib_utils/src/class/repoquery.py
new file mode 100644
index 000000000..2447719e2
--- /dev/null
+++ b/roles/lib_utils/src/class/repoquery.py
@@ -0,0 +1,156 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+class Repoquery(RepoqueryCLI):
+ ''' Class to wrap the repoquery
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self, name, query_type, show_duplicates,
+ match_version, verbose):
+ ''' Constructor for YumList '''
+ super(Repoquery, self).__init__(None)
+ self.name = name
+ self.query_type = query_type
+ self.show_duplicates = show_duplicates
+ self.match_version = match_version
+ self.verbose = verbose
+
+ if self.match_version:
+ self.show_duplicates = True
+
+ self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
+
+ def build_cmd(self):
+ ''' build the repoquery cmd options '''
+
+ repo_cmd = []
+
+ repo_cmd.append("--pkgnarrow=" + self.query_type)
+ repo_cmd.append("--queryformat=" + self.query_format)
+
+ if self.show_duplicates:
+ repo_cmd.append('--show-duplicates')
+
+ repo_cmd.append(self.name)
+
+ return repo_cmd
+
+ @staticmethod
+ def process_versions(query_output):
+ ''' format the package data into something that can be presented '''
+
+ version_dict = defaultdict(dict)
+
+ for version in query_output.split('\n'):
+ pkg_info = version.split("|")
+
+ pkg_version = {}
+ pkg_version['version'] = pkg_info[0]
+ pkg_version['release'] = pkg_info[1]
+ pkg_version['arch'] = pkg_info[2]
+ pkg_version['repo'] = pkg_info[3]
+ pkg_version['version_release'] = pkg_info[4]
+
+ version_dict[pkg_info[4]] = pkg_version
+
+ return version_dict
+
+ def format_versions(self, formatted_versions):
+ ''' Gather and present the versions of each package '''
+
+ versions_dict = {}
+ versions_dict['available_versions_full'] = formatted_versions.keys()
+
+ # set the match version, if called
+ if self.match_version:
+ versions_dict['matched_versions_full'] = []
+ versions_dict['requested_match_version'] = self.match_version
+ versions_dict['matched_versions'] = []
+
+ # get the "full version (version - release)
+ versions_dict['available_versions_full'].sort(key=LooseVersion)
+ versions_dict['latest_full'] = versions_dict['available_versions_full'][-1]
+
+ # get the "short version (version)
+ versions_dict['available_versions'] = []
+ for version in versions_dict['available_versions_full']:
+ versions_dict['available_versions'].append(formatted_versions[version]['version'])
+
+ if self.match_version:
+ if version.startswith(self.match_version):
+ versions_dict['matched_versions_full'].append(version)
+ versions_dict['matched_versions'].append(formatted_versions[version]['version'])
+
+ versions_dict['available_versions'].sort(key=LooseVersion)
+ versions_dict['latest'] = versions_dict['available_versions'][-1]
+
+ # finish up the matched version
+ if self.match_version:
+ if versions_dict['matched_versions_full']:
+ versions_dict['matched_version_found'] = True
+ versions_dict['matched_versions'].sort(key=LooseVersion)
+ versions_dict['matched_version_latest'] = versions_dict['matched_versions'][-1]
+ versions_dict['matched_version_full_latest'] = versions_dict['matched_versions_full'][-1]
+ else:
+ versions_dict['matched_version_found'] = False
+ versions_dict['matched_versions'] = []
+ versions_dict['matched_version_latest'] = ""
+ versions_dict['matched_version_full_latest'] = ""
+
+ return versions_dict
+
+ def repoquery(self):
+ '''perform a repoquery '''
+
+ repoquery_cmd = self.build_cmd()
+
+ rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
+
+ # check to see if there are actual results
+ if rval['results']:
+ processed_versions = Repoquery.process_versions(rval['results'].strip())
+ formatted_versions = self.format_versions(processed_versions)
+
+ rval['package_found'] = True
+ rval['versions'] = formatted_versions
+ rval['package_name'] = self.name
+
+ if self.verbose:
+ rval['raw_versions'] = processed_versions
+ else:
+ del rval['results']
+
+ # No packages found
+ else:
+ rval['package_found'] = False
+
+ return rval
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ repoquery = Repoquery(
+ params['name'],
+ params['query_type'],
+ params['show_duplicates'],
+ params['match_version'],
+ params['verbose'],
+ )
+
+ state = params['state']
+
+ if state == 'list':
+ results = repoquery.repoquery()
+
+ if results['returncode'] != 0:
+ return {'failed': True,
+ 'msg': results}
+
+ return {'changed': False, 'results': results, 'state': 'list', 'check_mode': check_mode}
+
+ return {'failed': True,
+ 'changed': False,
+ 'msg': 'Unknown state passed. %s' % state,
+ 'state': 'unknown'}
diff --git a/roles/lib_utils/src/doc/repoquery b/roles/lib_utils/src/doc/repoquery
new file mode 100644
index 000000000..82e273a42
--- /dev/null
+++ b/roles/lib_utils/src/doc/repoquery
@@ -0,0 +1,275 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: repoquery
+short_description: Query package information from Yum repositories
+description:
+ - Query package information from Yum repositories.
+options:
+ state:
+ description:
+ - The expected state. Currently only supports list.
+ required: false
+ default: list
+ choices: ["list"]
+ aliases: []
+ name:
+ description:
+ - The name of the package to query
+ required: true
+ default: None
+ aliases: []
+ query_type:
+ description:
+ - Narrows the packages queried based off of this value.
+ - If repos, it narrows the query to repositories defined on the machine.
+ - If installed, it narrows the query to only packages installed on the machine.
+ - If available, it narrows the query to packages that are available to be installed.
+ - If recent, it narrows the query to only recently edited packages.
+ - If updates, it narrows the query to only packages that are updates to existing installed packages.
+ - If extras, it narrows the query to packages that are not present in any of the available repositories.
+ - If all, it queries all of the above.
+ required: false
+ default: repos
+ aliases: []
+ verbose:
+ description:
+ - Shows more detail for the requested query.
+ required: false
+ default: false
+ aliases: []
+ show_duplicates:
+ description:
+ - Shows multiple versions of a package.
+ required: false
+ default: false
+ aliases: []
+ match_version:
+ description:
+ - Match the specific version given to the package.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Matt Woodson <mwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Example 1: Get bash versions
+ - name: Get bash version
+ repoquery:
+ name: bash
+ show_duplicates: True
+ register: bash_out
+
+# Results:
+# ok: [localhost] => {
+# "bash_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
+# "package_found": true,
+# "package_name": "bash",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "4.2.45",
+# "4.2.45",
+# "4.2.45",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46"
+# ],
+# "available_versions_full": [
+# "4.2.45-5.el7",
+# "4.2.45-5.el7_0.2",
+# "4.2.45-5.el7_0.4",
+# "4.2.46-12.el7",
+# "4.2.46-19.el7",
+# "4.2.46-20.el7_2",
+# "4.2.46-21.el7_3"
+# ],
+# "latest": "4.2.46",
+# "latest_full": "4.2.46-21.el7_3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+
+
+# Example 2: Get bash versions verbosely
+ - name: Get bash versions verbosely
+ repoquery:
+ name: bash
+ show_duplicates: True
+ verbose: True
+ register: bash_out
+
+# Results:
+# ok: [localhost] => {
+# "bash_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
+# "package_found": true,
+# "package_name": "bash",
+# "raw_versions": {
+# "4.2.45-5.el7": {
+# "arch": "x86_64",
+# "release": "5.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7"
+# },
+# "4.2.45-5.el7_0.2": {
+# "arch": "x86_64",
+# "release": "5.el7_0.2",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7_0.2"
+# },
+# "4.2.45-5.el7_0.4": {
+# "arch": "x86_64",
+# "release": "5.el7_0.4",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.45",
+# "version_release": "4.2.45-5.el7_0.4"
+# },
+# "4.2.46-12.el7": {
+# "arch": "x86_64",
+# "release": "12.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-12.el7"
+# },
+# "4.2.46-19.el7": {
+# "arch": "x86_64",
+# "release": "19.el7",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-19.el7"
+# },
+# "4.2.46-20.el7_2": {
+# "arch": "x86_64",
+# "release": "20.el7_2",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-20.el7_2"
+# },
+# "4.2.46-21.el7_3": {
+# "arch": "x86_64",
+# "release": "21.el7_3",
+# "repo": "rhel-7-server-rpms",
+# "version": "4.2.46",
+# "version_release": "4.2.46-21.el7_3"
+# }
+# },
+# "results": "4.2.45|5.el7|x86_64|rhel-7-server-rpms|4.2.45-5.el7\n4.2.45|5.el7_0.2|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.2\n4.2.45|5.el7_0.4|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.4\n4.2.46|12.el7|x86_64|rhel-7-server-rpms|4.2.46-12.el7\n4.2.46|19.el7|x86_64|rhel-7-server-rpms|4.2.46-19.el7\n4.2.46|20.el7_2|x86_64|rhel-7-server-rpms|4.2.46-20.el7_2\n4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3\n",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "4.2.45",
+# "4.2.45",
+# "4.2.45",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46",
+# "4.2.46"
+# ],
+# "available_versions_full": [
+# "4.2.45-5.el7",
+# "4.2.45-5.el7_0.2",
+# "4.2.45-5.el7_0.4",
+# "4.2.46-12.el7",
+# "4.2.46-19.el7",
+# "4.2.46-20.el7_2",
+# "4.2.46-21.el7_3"
+# ],
+# "latest": "4.2.46",
+# "latest_full": "4.2.46-21.el7_3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+# Example 3: Match a specific version
+ - name: matched versions repoquery test
+ repoquery:
+ name: atomic-openshift
+ show_duplicates: True
+ match_version: 3.3
+ register: openshift_out
+
+# Result:
+
+# ok: [localhost] => {
+# "openshift_out": {
+# "changed": false,
+# "results": {
+# "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates atomic-openshift",
+# "package_found": true,
+# "package_name": "atomic-openshift",
+# "returncode": 0,
+# "versions": {
+# "available_versions": [
+# "3.2.0.43",
+# "3.2.1.23",
+# "3.3.0.32",
+# "3.3.0.34",
+# "3.3.0.35",
+# "3.3.1.3",
+# "3.3.1.4",
+# "3.3.1.5",
+# "3.3.1.7",
+# "3.4.0.39"
+# ],
+# "available_versions_full": [
+# "3.2.0.43-1.git.0.672599f.el7",
+# "3.2.1.23-1.git.0.88a7a1d.el7",
+# "3.3.0.32-1.git.0.37bd7ea.el7",
+# "3.3.0.34-1.git.0.83f306f.el7",
+# "3.3.0.35-1.git.0.d7bd9b6.el7",
+# "3.3.1.3-1.git.0.86dc49a.el7",
+# "3.3.1.4-1.git.0.7c8657c.el7",
+# "3.3.1.5-1.git.0.62700af.el7",
+# "3.3.1.7-1.git.0.0988966.el7",
+# "3.4.0.39-1.git.0.5f32f06.el7"
+# ],
+# "latest": "3.4.0.39",
+# "latest_full": "3.4.0.39-1.git.0.5f32f06.el7",
+# "matched_version_found": true,
+# "matched_version_full_latest": "3.3.1.7-1.git.0.0988966.el7",
+# "matched_version_latest": "3.3.1.7",
+# "matched_versions": [
+# "3.3.0.32",
+# "3.3.0.34",
+# "3.3.0.35",
+# "3.3.1.3",
+# "3.3.1.4",
+# "3.3.1.5",
+# "3.3.1.7"
+# ],
+# "matched_versions_full": [
+# "3.3.0.32-1.git.0.37bd7ea.el7",
+# "3.3.0.34-1.git.0.83f306f.el7",
+# "3.3.0.35-1.git.0.d7bd9b6.el7",
+# "3.3.1.3-1.git.0.86dc49a.el7",
+# "3.3.1.4-1.git.0.7c8657c.el7",
+# "3.3.1.5-1.git.0.62700af.el7",
+# "3.3.1.7-1.git.0.0988966.el7"
+# ],
+# "requested_match_version": "3.3"
+# }
+# },
+# "state": "present"
+# }
+# }
+
+'''
diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py
new file mode 100644
index 000000000..d892353a1
--- /dev/null
+++ b/roles/lib_utils/src/lib/import.py
@@ -0,0 +1,14 @@
+# flake8: noqa
+# pylint: skip-file
+
+# pylint: disable=wrong-import-order,wrong-import-position,unused-import
+
+from __future__ import print_function # noqa: F401
+import json # noqa: F401
+import os # noqa: F401
+import re # noqa: F401
+# pylint: disable=import-error
+import ruamel.yaml as yaml # noqa: F401
+import shutil # noqa: F401
+
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/lib/repoquery.py b/roles/lib_utils/src/lib/repoquery.py
new file mode 100644
index 000000000..91ccd9815
--- /dev/null
+++ b/roles/lib_utils/src/lib/repoquery.py
@@ -0,0 +1,92 @@
+# pylint: skip-file
+# flake8: noqa
+
+'''
+ class that wraps the repoquery commands in a subprocess
+'''
+
+# pylint: disable=too-many-lines,wrong-import-position,wrong-import-order
+
+from collections import defaultdict # noqa: E402
+
+
+# pylint: disable=no-name-in-module,import-error
+# Reason: pylint errors with "No name 'version' in module 'distutils'".
+# This is a bug: https://github.com/PyCQA/pylint/issues/73
+from distutils.version import LooseVersion # noqa: E402
+
+import subprocess # noqa: E402
+
+
+class RepoqueryCLIError(Exception):
+ '''Exception class for repoquerycli'''
+ pass
+
+
+def _run(cmds):
+ ''' Actually executes the command. This makes mocking easier. '''
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ stdout, stderr = proc.communicate()
+
+ return proc.returncode, stdout, stderr
+
+
+# pylint: disable=too-few-public-methods
+class RepoqueryCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ verbose=False):
+ ''' Constructor for RepoqueryCLI '''
+ self.verbose = verbose
+ self.verbose = True
+
+ def _repoquery_cmd(self, cmd, output=False, output_type='json'):
+ '''Base command for repoquery '''
+ cmds = ['/usr/bin/repoquery', '--plugins', '--quiet']
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ returncode, stdout, stderr = _run(cmds)
+
+ rval = {
+ "returncode": returncode,
+ "results": results,
+ "cmd": ' '.join(cmds),
+ }
+
+ if returncode == 0:
+ if output:
+ if output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print(stdout)
+ print(stderr)
+
+ if err:
+ rval.update({
+ "err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds
+ })
+
+ else:
+ rval.update({
+ "stderr": stderr,
+ "stdout": stdout,
+ "results": {},
+ })
+
+ return rval
diff --git a/roles/lib_utils/src/sources.yml b/roles/lib_utils/src/sources.yml
index 9cf3a0981..053b59f77 100644
--- a/roles/lib_utils/src/sources.yml
+++ b/roles/lib_utils/src/sources.yml
@@ -2,7 +2,16 @@
yedit.py:
- doc/generated
- doc/license
-- class/import.py
+- lib/import.py
- doc/yedit
- class/yedit.py
- ansible/yedit.py
+
+repoquery.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/repoquery
+- lib/repoquery.py
+- class/repoquery.py
+- ansible/repoquery.py
diff --git a/roles/lib_utils/src/test/integration/repoquery.yml b/roles/lib_utils/src/test/integration/repoquery.yml
new file mode 100755
index 000000000..425324387
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/repoquery.yml
@@ -0,0 +1,136 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+---
+- hosts: localhost
+ gather_facts: no
+
+ tasks:
+ - name: basic query test - Act
+ repoquery:
+ name: bash
+ register: rq_out
+
+ - name: Set a real package version to be used later
+ set_fact:
+ latest_available_bash_version: "{{ rq_out.results.versions.latest }}"
+ latest_available_full_bash_version: "{{ rq_out.results.versions.latest_full }}"
+
+ - name: basic query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == True"
+ - "rq_out.results.package_name == 'bash'"
+ - "rq_out.results.versions.available_versions | length == 1"
+ - "rq_out.results.versions.available_versions_full | length == 1"
+ - "rq_out.results.versions.latest is defined"
+ - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
+ - "rq_out.results.versions.latest_full is defined"
+ - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
+
+ - name: show_duplicates query test - Act
+ repoquery:
+ name: bash
+ show_duplicates: True
+ register: rq_out
+
+ - name: show_duplicates query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == True"
+ - "rq_out.results.package_name == 'bash'"
+ - "rq_out.results.versions.available_versions | length >= 1"
+ - "rq_out.results.versions.available_versions_full | length >= 1"
+ - "rq_out.results.versions.latest is defined"
+ - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
+ - "rq_out.results.versions.latest_full is defined"
+ - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
+
+ - name: show_duplicates verbose query test - Act
+ repoquery:
+ name: bash
+ show_duplicates: True
+ verbose: True
+ register: rq_out
+
+ - name: show_duplicates verbose query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == True"
+ - "rq_out.results.package_name == 'bash'"
+ - "rq_out.results.raw_versions | length > 0"
+ - "rq_out.results.versions.available_versions | length > 0"
+ - "rq_out.results.versions.available_versions_full | length > 0"
+ - "rq_out.results.versions.latest is defined"
+ - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
+ - "rq_out.results.versions.latest_full is defined"
+ - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
+
+ - name: query package does not exist query test - Act
+ repoquery:
+ name: somemadeuppackagenamethatwontmatch
+ show_duplicates: True
+ register: rq_out
+
+ - name: query package does not exist query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == False"
+ - "rq_out.results.results == ''"
+
+
+ - name: query match_version does not exist query test - Act
+ repoquery:
+ name: bash
+ show_duplicates: True
+ match_version: somemadeupversionnotexist
+ register: rq_out
+
+ - name: query match_version does not exist query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == True"
+ - "rq_out.results.package_name == 'bash'"
+ - "rq_out.results.versions.matched_version_found == False"
+ - "rq_out.results.versions.available_versions | length > 0"
+ - "rq_out.results.versions.available_versions_full | length > 0"
+ - "rq_out.results.versions.latest is defined"
+ - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
+ - "rq_out.results.versions.latest_full is defined"
+ - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
+
+ - name: query match_version exists query test - Act
+ repoquery:
+ name: bash
+ show_duplicates: True
+ match_version: "{{ latest_available_bash_version }}"
+ register: rq_out
+
+ - name: query match_version exists query test - Assert
+ assert:
+ that:
+ - "rq_out.state == 'list'"
+ - "rq_out.changed == False"
+ - "rq_out.results.returncode == 0"
+ - "rq_out.results.package_found == True"
+ - "rq_out.results.package_name == 'bash'"
+ - "rq_out.results.versions.matched_version_found == True"
+ - "rq_out.results.versions.available_versions | length > 0"
+ - "rq_out.results.versions.available_versions_full | length > 0"
+ - "rq_out.results.versions.latest is defined"
+ - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
+ - "rq_out.results.versions.latest_full is defined"
+ - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
diff --git a/roles/lib_utils/src/test/unit/repoquery.py b/roles/lib_utils/src/test/unit/repoquery.py
new file mode 100755
index 000000000..c487ab254
--- /dev/null
+++ b/roles/lib_utils/src/test/unit/repoquery.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for repoquery
+'''
+# To run:
+# ./repoquery.py
+#
+# .
+# Ran 1 test in 0.002s
+#
+# OK
+
+import os
+import sys
+import unittest
+import mock
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from repoquery import Repoquery # noqa: E402
+
+
+class RepoQueryTest(unittest.TestCase):
+ '''
+ Test class for RepoQuery
+ '''
+
+ def setUp(self):
+ ''' setup method for other tests '''
+ pass
+
+ @mock.patch('repoquery._run')
+ def test_querying_a_package(self, mock_cmd):
+ ''' Testing querying a package '''
+
+ # Arrange
+
+ # run_ansible input parameters
+ params = {
+ 'state': 'list',
+ 'name': 'bash',
+ 'query_type': 'repos',
+ 'verbose': False,
+ 'show_duplicates': False,
+ 'match_version': None,
+ }
+
+ valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
+ Repo rhel-7-server-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/4128505182875899164-key.pem''' # not real
+
+ # Return values of our mocked function call. These get returned once per call.
+ mock_cmd.side_effect = [
+ (0, '4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3', valid_stderr), # first call to the mock
+ ]
+
+ # Act
+ results = Repoquery.run_ansible(params, False)
+
+ # Assert
+ self.assertEqual(results['state'], 'list')
+ self.assertFalse(results['changed'])
+ self.assertTrue(results['results']['package_found'])
+ self.assertEqual(results['results']['returncode'], 0)
+ self.assertEqual(results['results']['package_name'], 'bash')
+ self.assertEqual(results['results']['versions'], {'latest_full': '4.2.46-21.el7_3',
+ 'available_versions': ['4.2.46'],
+ 'available_versions_full': ['4.2.46-21.el7_3'],
+ 'latest': '4.2.46'})
+
+ # Making sure our mock was called as we expected
+ mock_cmd.assert_has_calls([
+ mock.call(['/usr/bin/repoquery', '--plugins', '--quiet', '--pkgnarrow=repos', '--queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}', 'bash']),
+ ])
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
index 0d0a2a629..1025ab056 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
@@ -11,7 +11,7 @@
"tags": "database,postgresql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
"labels": {
"template": "postgresql-ephemeral-template"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
index 257726cfd..1968e727a 100644
--- a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
@@ -11,7 +11,7 @@
"tags": "database,postgresql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
"labels": {
"template": "postgresql-persistent-template"
},
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
index 62765e03d..f48d8d4a8 100644
--- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
@@ -12,10 +12,15 @@ reference and supply your forked repository as the source-repository when
instantiating them.
* [CakePHP](https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json) - Provides a basic CakePHP application with a MySQL database. For more information see the [source repository](https://github.com/openshift/cakephp-ex).
+* [CakePHP persistent](https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql-persistent.json) - Provides a basic CakePHP application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/cakephp-ex).
* [Dancer](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json) - Provides a basic Dancer (Perl) application with a MySQL database. For more information see the [source repository](https://github.com/openshift/dancer-ex).
+* [Dancer persistent](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql-persistent.json) - Provides a basic Dancer (Perl) application with a persistent MySQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/dancer-ex).
* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [Django persistent](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql-persistent.json) - Provides a basic Django (Python) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/django-ex).
* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
+* [NodeJS persistent](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb-persistent.json) - Provides a basic NodeJS application with a persistent MongoDB database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
+* [Rails persistent](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql-persistent.json) - Provides a basic Rails (Ruby) application with a persistent PostgreSQL database. Note: requires available persistent volumes. For more information see the [source repository](https://github.com/openshift/rails-ex).
Note: This file is processed by `hack/update-external-examples.sh`. New examples
must follow the exact syntax of the existing entries. Files in this directory
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
new file mode 100644
index 000000000..0ba57864e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql-persistent.json
@@ -0,0 +1,575 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "CakePHP + MySQL (Persistent)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
+ "tags": "quickstart,php,cakephp",
+ "iconClass": "icon-php"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
+ "labels": {
+ "template": "cakephp-mysql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "cakephp-secret-token" : "${CAKEPHP_SECRET_TOKEN}",
+ "cakephp-security-salt" : "${CAKEPHP_SECURITY_SALT}",
+ "cakephp-security-cipher-seed" : "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "php:7.0"
+ },
+ "env": [
+ {
+ "name": "COMPOSER_MIRROR",
+ "value": "${COMPOSER_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Retry",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "cakephp-mysql-persistent"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-mysql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-mysql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health.php",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-secret-token"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-salt"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "cakephp-security-cipher-seed"
+ }
+ }
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "cakephp-mysql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the CakePHP container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database User",
+ "required": true,
+ "value": "cakephp"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "displayName": "CakePHP secret token",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "displayName": "CakePHP Security Salt",
+ "description": "Security salt for session hash.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "displayName": "CakePHP Security Cipher Seed",
+ "description": "Security cipher seed for session hash.",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "displayName": "OPcache Revalidation Frequency",
+ "description": "How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ },
+ {
+ "name": "COMPOSER_MIRROR",
+ "displayName": "Custom Composer Mirror URL",
+ "description": "The custom Composer mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
new file mode 100644
index 000000000..074561550
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql-persistent.json
@@ -0,0 +1,519 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Dancer + MySQL (Persistent)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "tags": "quickstart,perl,dancer",
+ "iconClass": "icon-perl"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "labels": {
+ "template": "dancer-mysql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "perl:5.24"
+ },
+ "env": [
+ {
+ "name": "CPAN_MIRROR",
+ "value": "${CPAN_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-mysql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-mysql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.7"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dancer-mysql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Perl Dancer container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "database"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "displayName": "Perl Module Reload",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules.",
+ "value": ""
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "CPAN_MIRROR",
+ "displayName": "Custom CPAN Mirror URL",
+ "description": "The custom CPAN mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
new file mode 100644
index 000000000..b39771bd8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql-persistent.json
@@ -0,0 +1,532 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Django + PostgreSQL (Persistent)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "tags": "quickstart,python,django",
+ "iconClass": "icon-python"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "labels": {
+ "template": "django-psql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "django-secret-key" : "${DJANGO_SECRET_KEY}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "python:3.5"
+ },
+ "env": [
+ {
+ "name": "PIP_INDEX_URL",
+ "value": "${PIP_INDEX_URL}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./manage.py test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-psql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-psql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "django-secret-key"
+ }
+ }
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "django-psql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Django container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "required": true,
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "required": true,
+ "value": "django"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database User Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "displayName": "Application Configuration File Path",
+ "description": "Relative path to Gunicorn configuration file (optional)."
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "displayName": "Django Secret Key",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "PIP_INDEX_URL",
+ "displayName": "Custom PyPi Index URL",
+ "description": "The custom PyPi index URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
new file mode 100644
index 000000000..fecb84662
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb-persistent.json
@@ -0,0 +1,541 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongo-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Node.js + MongoDB (Persistent)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "tags": "quickstart,nodejs",
+ "iconClass": "icon-nodejs"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "labels": {
+ "template": "nodejs-mongo-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "database-user": "${DATABASE_USER}",
+ "database-password": "${DATABASE_PASSWORD}",
+ "database-admin-password" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "nodejs:4"
+ },
+ "env": [
+ {
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-mongo-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-mongo-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongodb",
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mongodb:3.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "nodejs-mongo-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Node.js container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MONGODB_LIMIT",
+ "displayName": "Memory Limit (MongoDB)",
+ "description": "Maximum amount of memory the MongoDB container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mongodb"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "MongoDB Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "MongoDB Password",
+ "description": "Password for the MongoDB user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "DATABASE_ADMIN_PASSWORD",
+ "displayName": "Database Administrator Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "NPM_MIRROR",
+ "displayName": "Custom NPM Mirror URL",
+ "description": "The custom NPM mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
new file mode 100644
index 000000000..6c0a484b5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql-persistent.json
@@ -0,0 +1,598 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-pgsql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "Rails + PostgreSQL (Persistent)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "tags": "quickstart,ruby,rails",
+ "iconClass": "icon-ruby"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "labels": {
+ "template": "rails-pgsql-persistent"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${DATABASE_USER}",
+ "database-password" : "${DATABASE_PASSWORD}",
+ "application-user" : "${APPLICATION_USER}",
+ "application-password" : "${APPLICATION_PASSWORD}",
+ "keybase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "ruby:2.3"
+ },
+ "env": [
+ {
+ "name": "RUBYGEM_MIRROR",
+ "value": "${RUBYGEM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "bundle exec rake test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "${NAME}"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "rails-pgsql-persistent"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "rails-pgsql-persistent",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 5,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 10,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keybase"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "value": "${APPLICATION_DOMAIN}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-user"
+ }
+ }
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "application-password"
+ }
+ }
+ },
+ {
+ "name": "RAILS_ENV",
+ "value": "${RAILS_ENV}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "rails-pgsql-persistent"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Rails container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/rails-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Rails service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "displayName": "Application Username",
+ "required": true,
+ "description": "The application user that is used within the sample application to authorize access on pages.",
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "displayName": "Application Password",
+ "required": true,
+ "description": "The application password that is used within the sample application to authorize access on pages.",
+ "value": "secret"
+ },
+ {
+ "name": "RAILS_ENV",
+ "displayName": "Rails Environment",
+ "required": true,
+ "description": "Environment under which the sample application will run. Could be set to production, development or test.",
+ "value": "production"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "root"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ },
+ {
+ "name": "RUBYGEM_MIRROR",
+ "displayName": "Custom RubyGems Mirror URL",
+ "description": "The custom RubyGems mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 3c8e2ab9c..7c61da950 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -195,8 +195,7 @@ def hostname_valid(hostname):
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
- hostname.endswith('novalocal') or
- len(hostname.split('.')) < 2):
+ hostname.endswith('novalocal')):
return False
return True
@@ -332,7 +331,8 @@ def normalize_gce_facts(metadata, facts):
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
- facts['network']['hostname'] = metadata['instance']['hostname']
+ # Split instance hostname from GCE metadata to use the short instance name
+ facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
@@ -867,6 +867,7 @@ def set_deployment_facts_if_unset(facts):
return facts
+# pylint: disable=too-many-statements
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -904,8 +905,8 @@ def set_version_facts_if_unset(facts):
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
- version_gte_3_4_or_1_4 = False
- version_gte_3_5_or_1_5 = False
+ version_gte_3_4_or_1_4 = True
+ version_gte_3_5_or_1_5 = True
version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
@@ -915,7 +916,9 @@ def set_version_facts_if_unset(facts):
facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
- if version_gte_3_4_or_1_4:
+ if version_gte_3_5_or_1_5:
+ examples_content_version = 'v1.5'
+ elif version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
elif version_gte_3_3_or_1_3:
examples_content_version = 'v1.3'
@@ -1019,7 +1022,7 @@ def set_nodename(facts):
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
- facts['node']['nodename'] = '.'.split(facts['provider']['metadata']['hostname'])[0]
+ facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
diff --git a/roles/openshift_hosted_logging/handlers/main.yml b/roles/openshift_hosted_logging/handlers/main.yml
new file mode 100644
index 000000000..ad79e62ae
--- /dev/null
+++ b/roles/openshift_hosted_logging/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
diff --git a/roles/openshift_hosted_logging/meta/main.yaml b/roles/openshift_hosted_logging/meta/main.yaml
index b695bde87..044c8043c 100644
--- a/roles/openshift_hosted_logging/meta/main.yaml
+++ b/roles/openshift_hosted_logging/meta/main.yaml
@@ -1,3 +1,4 @@
---
dependencies:
- { role: openshift_common }
+ - { role: openshift_master_facts }
diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
index 513a74c69..afd82766f 100644
--- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
+++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml
@@ -165,6 +165,8 @@
retries: 20
delay: 15
+- include: update_master_config.yaml
+
- debug:
msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
diff --git a/roles/openshift_hosted_logging/tasks/update_master_config.yaml b/roles/openshift_hosted_logging/tasks/update_master_config.yaml
new file mode 100644
index 000000000..1122e059c
--- /dev/null
+++ b/roles/openshift_hosted_logging/tasks/update_master_config.yaml
@@ -0,0 +1,7 @@
+---
+- name: Adding Kibana route information to loggingPublicURL
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.loggingPublicURL
+ yaml_value: "https://{{ logging_hostname }}"
+ notify: restart master
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
new file mode 100644
index 000000000..ad79e62ae
--- /dev/null
+++ b/roles/openshift_logging/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+ notify: Verify API Server
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
index 7050e51db..bc45dcdab 100644
--- a/roles/openshift_logging/meta/main.yaml
+++ b/roles/openshift_logging/meta/main.yaml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: OpenShift Red Hat
- description: OpenShift Embedded Router
+ description: OpenShift Aggregated Logging
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 2.2
@@ -12,4 +12,6 @@ galaxy_info:
categories:
- cloud
dependencies:
+- role: lib_openshift
- role: openshift_facts
+- role: openshift_master_facts
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
index 9b1c004f2..64e983557 100644
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -1,25 +1,31 @@
---
+- name: Getting current ES deployment size
+ set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
+
- name: Generate PersistentVolumeClaims
include: "{{ role_path}}/tasks/generate_pvcs.yaml"
vars:
es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+# we should initialize the es_dc_pool with the current keys
- name: Init pool of DeploymentConfig names for Elasticsearch
- set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }}
+ with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
+ loop_control:
+ loop_var: deploy_name
+
+# This should be used to generate new DC names if necessary
+- name: Create new DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}}
vars:
component: es
es_cluster_name: "{{component}}"
deploy_name_prefix: "logging-{{component}}"
deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }}
check_mode: no
-
- name: Generate Elasticsearch DeploymentConfig
template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
vars:
@@ -35,14 +41,15 @@
deploy_name: "{{item.1}}"
es_node_selector: "{{openshift_logging_es_nodeselector | default({})}}"
with_indexed_items:
- - "{{es_dc_pool | default([])}}"
+ - "{{ es_dc_pool }}"
check_mode: no
- when:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
changed_when: no
# --------- Tasks for Operation clusters ---------
+- name: Getting current ES deployment size
+ set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
+
- name: Validate Elasticsearch cluster size for Ops
fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
vars:
@@ -65,21 +72,27 @@
openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
-- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
- set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+- name: Init pool of DeploymentConfig names for Elasticsearch Ops
+ set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }}
+ with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
+ loop_control:
+ loop_var: deploy_name
+ when:
+ - openshift_logging_use_ops
+
+- name: Create new DeploymentConfig names for Elasticsearch Ops
+ set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}}
vars:
component: es-ops
es_cluster_name: "{{component}}"
deploy_name_prefix: "logging-{{component}}"
deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
- with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }}
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
- name: Generate Elasticsearch DeploymentConfig for Ops
@@ -101,9 +114,8 @@
openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({})}}"
with_indexed_items:
- - "{{es_dc_pool_ops | default([])}}"
+ - "{{ es_ops_dc_pool | default([]) }}"
when:
- openshift_logging_use_ops
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
index 4c510c6e7..35273829c 100644
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -1,8 +1,8 @@
---
-- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
check_mode: no
-- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
check_mode: no
- name: Generating Fluentd daemonset
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index a9699adb8..00c79ee5e 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -8,8 +8,12 @@
check_mode: no
- name: Validate Elasticsearch cluster size
- fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
- when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size
- name: Install logging
include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
@@ -44,6 +48,8 @@
loop_var: file
when: not ansible_check_mode
+- include: update_master_config.yaml
+
- name: Printing out objects to create
debug: msg={{file.content | b64decode }}
with_items: "{{ object_defs.results }}"
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
index bd5073381..ebe8f1ca8 100644
--- a/roles/openshift_logging/tasks/label_node.yaml
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -49,4 +49,4 @@
- unlabel is defined
- unlabel
- not ansible_check_mode
- - label_value.stdout != ""
+ - label in node_labels.stdout
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 4c718805e..36fb827c3 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -12,6 +12,10 @@
- debug: msg="Created temp dir {{mktemp.stdout}}"
+- name: Ensuring ruamel.yaml package is on target
+ command: yum install -y ruamel.yaml
+ check_mode: no
+
- name: Copy the admin client config(s)
command: >
cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
deleted file mode 100644
index 125d3b8af..000000000
--- a/roles/openshift_logging/tasks/scale.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: replica_count
- failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
- --replicas={{desired}} -n {{openshift_logging_namespace}}
- register: scale_result
- failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
- when:
- - not ansible_check_mode
- - replica_count.stdout|int != desired
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}'
- register: replica_counts
- until: replica_counts.stdout|int == desired
- retries: 30
- delay: 10
- when:
- - not ansible_check_mode
- - replica_count.stdout|int != desired
- changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
index a96ad3f3a..07489ae79 100644
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -1,18 +1,21 @@
---
- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
register: fluentd_hosts
when: "'--all' in openshift_logging_fluentd_hosts"
check_mode: no
changed_when: no
+- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
- name: start fluentd
include: label_node.yaml
vars:
host: "{{fluentd_host}}"
label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
- with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
@@ -23,9 +26,12 @@
changed_when: no
- name: start elasticsearch
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -37,9 +43,12 @@
changed_when: no
- name: start kibana
- include: scale.yaml
- vars:
- desired: "{{ openshift_logging_kibana_replica_count | default (1) }}"
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -51,9 +60,12 @@
changed_when: no
- name: start curator
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -65,9 +77,12 @@
changed_when: no
- name: start elasticsearch-ops
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -80,9 +95,12 @@
changed_when: no
- name: start kibana-ops
- include: scale.yaml
- vars:
- desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -95,9 +113,12 @@
changed_when: no
- name: start curator-ops
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
index e44493e4d..8e0df8344 100644
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -1,17 +1,20 @@
---
- command: >
- {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o name
register: fluentd_hosts
when: "'--all' in openshift_logging_fluentd_hosts"
changed_when: no
+- set_fact: openshift_logging_fluentd_hosts={{ fluentd_hosts.stdout_lines | regex_replace('node/', '') }}
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
- name: stop fluentd
include: label_node.yaml
vars:
host: "{{fluentd_host}}"
label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
unlabel: True
- with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ with_items: "{{ openshift_logging_fluentd_hosts }}"
loop_control:
loop_var: fluentd_host
@@ -21,9 +24,12 @@
changed_when: no
- name: stop elasticsearch
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -34,9 +40,12 @@
changed_when: no
- name: stop kibana
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -47,9 +56,12 @@
changed_when: no
- name: stop curator
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -60,9 +72,12 @@
changed_when: no
- name: stop elasticsearch-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -74,9 +89,12 @@
changed_when: no
- name: stop kibana-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{kibana_dc.stdout_lines}}"
loop_control:
loop_var: object
@@ -88,9 +106,12 @@
changed_when: no
- name: stop curator-ops
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{openshift_logging_namespace}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 0
with_items: "{{curator_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml
new file mode 100644
index 000000000..af303c47c
--- /dev/null
+++ b/roles/openshift_logging/tasks/update_master_config.yaml
@@ -0,0 +1,7 @@
+---
+- name: Adding Kibana route information to loggingPublicURL
+ modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: assetConfig.loggingPublicURL
+ yaml_value: "https://{{ openshift_logging_kibana_hostname }}"
+ notify: restart master
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
index a93463239..cceacd538 100644
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -14,9 +14,11 @@
check_mode: no
- name: start elasticsearch
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: dc
+ name: "{{object.split('/')[1]}}"
+ namespace: "{{mktemp.stdout}}/admin.kubeconfig"
+ replicas: 1
with_items: "{{es_dc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 29a59a0d3..0c94228c6 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -52,32 +52,64 @@ class LookupModule(LookupBase):
# convert short_version to origin short_version
short_version = re.sub('^3.', '1.', short_version)
- if short_version in ['1.1', '1.2']:
- predicates.append({'name': 'PodFitsHostPorts'})
- predicates.append({'name': 'PodFitsResources'})
-
- # applies to all known versions
- predicates.append({'name': 'NoDiskConflict'})
-
- # only 1.1 didn't include NoVolumeZoneConflict
- if short_version != '1.1':
- predicates.append({'name': 'NoVolumeZoneConflict'})
-
- if short_version in ['1.1', '1.2']:
- predicates.append({'name': 'MatchNodeSelector'})
-
- if short_version != '1.1':
- predicates.append({'name': 'MaxEBSVolumeCount'})
- predicates.append({'name': 'MaxGCEPDVolumeCount'})
-
- if short_version not in ['1.1', '1.2']:
- predicates.append({'name': 'GeneralPredicates'})
- predicates.append({'name': 'PodToleratesNodeTaints'})
- predicates.append({'name': 'CheckNodeMemoryPressure'})
-
- if short_version not in ['1.1', '1.2', '1.3']:
- predicates.append({'name': 'CheckNodeDiskPressure'})
- predicates.append({'name': 'MatchInterPodAffinity'})
+ # Predicates ordered according to OpenShift Origin source:
+ # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
+ if short_version == '1.1':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'MatchNodeSelector'},
+ ])
+
+ if short_version == '1.2':
+ predicates.extend([
+ {'name': 'PodFitsHostPorts'},
+ {'name': 'PodFitsResources'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MatchNodeSelector'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'}
+ ])
+
+ if short_version == '1.3':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'}
+ ])
+
+ if short_version == '1.4':
+ predicates.extend([
+ {'name': 'NoDiskConflict'},
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ {'name': 'MatchInterPodAffinity'}
+ ])
+
+ if short_version in ['1.5', '1.6']:
+ predicates.extend([
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+ ])
if regions_enabled:
region_predicate = {
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 36022597f..95ace7923 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -11,11 +11,7 @@ class LookupModule(LookupBase):
def run(self, terms, variables=None, zones_enabled=True, short_version=None,
deployment_type=None, **kwargs):
- priorities = [
- {'name': 'LeastRequestedPriority', 'weight': 1},
- {'name': 'BalancedResourceAllocation', 'weight': 1},
- {'name': 'SelectorSpreadPriority', 'weight': 1}
- ]
+ priorities = []
if short_version is None or deployment_type is None:
if 'openshift' not in variables:
@@ -57,18 +53,51 @@ class LookupModule(LookupBase):
# convert short_version to origin short_version
short_version = re.sub('^3.', '1.', short_version)
- if short_version == '1.4':
- priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
-
- # only 1.1 didn't include NodeAffinityPriority
- if short_version != '1.1':
- priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
+ if short_version == '1.1':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1}
+ ])
+
+ if short_version == '1.2':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version == '1.3':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
- if short_version not in ['1.1', '1.2']:
- priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
-
- if short_version not in ['1.1', '1.2', '1.3']:
- priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
+ if short_version == '1.4':
+ priorities.extend([
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1}
+ ])
+
+ if short_version in ['1.5', '1.6']:
+ priorities.extend([
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1},
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+ ])
if zones_enabled:
zone_priority = {
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index 07bac6826..68b6deb88 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -9,6 +9,9 @@ sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")]
from openshift_master_facts_default_predicates import LookupModule # noqa: E402
+# Predicates ordered according to OpenShift Origin source:
+# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
DEFAULT_PREDICATES_1_1 = [
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
@@ -48,6 +51,18 @@ DEFAULT_PREDICATES_1_4 = [
{'name': 'MatchInterPodAffinity'}
]
+DEFAULT_PREDICATES_1_5 = [
+ {'name': 'NoVolumeZoneConflict'},
+ {'name': 'MaxEBSVolumeCount'},
+ {'name': 'MaxGCEPDVolumeCount'},
+ {'name': 'MatchInterPodAffinity'},
+ {'name': 'NoDiskConflict'},
+ {'name': 'GeneralPredicates'},
+ {'name': 'PodToleratesNodeTaints'},
+ {'name': 'CheckNodeMemoryPressure'},
+ {'name': 'CheckNodeDiskPressure'},
+]
+
REGION_PREDICATE = {
'name': 'Region',
'argument': {
@@ -66,10 +81,10 @@ TEST_VARS = [
('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
('1.4', 'origin', DEFAULT_PREDICATES_1_4),
('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
- ('1.5', 'origin', DEFAULT_PREDICATES_1_4),
- ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
- ('1.6', 'origin', DEFAULT_PREDICATES_1_4),
- ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.5', 'origin', DEFAULT_PREDICATES_1_5),
+ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
+ ('1.6', 'origin', DEFAULT_PREDICATES_1_5),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
]
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
index 5427a07a1..4e44a2b3d 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py
@@ -40,6 +40,16 @@ DEFAULT_PRIORITIES_1_4 = [
{'name': 'InterPodAffinityPriority', 'weight': 1}
]
+DEFAULT_PRIORITIES_1_5 = [
+ {'name': 'SelectorSpreadPriority', 'weight': 1},
+ {'name': 'InterPodAffinityPriority', 'weight': 1},
+ {'name': 'LeastRequestedPriority', 'weight': 1},
+ {'name': 'BalancedResourceAllocation', 'weight': 1},
+ {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+ {'name': 'NodeAffinityPriority', 'weight': 1},
+ {'name': 'TaintTolerationPriority', 'weight': 1}
+]
+
ZONE_PRIORITY = {
'name': 'Zone',
'argument': {
@@ -58,7 +68,11 @@ TEST_VARS = [
('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
- ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4),
+ ('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
+ ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
+ ('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
+ ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
]
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index 68e94992e..50214135c 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -15,4 +15,5 @@ galaxy_info:
categories:
- openshift
dependencies:
+- { role: lib_openshift }
- { role: openshift_facts }
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 9cf4afee0..9333d341c 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -19,25 +19,53 @@
- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-truststore.pwd
register: hawkular_truststore_password
+- stat: path="{{openshift_metrics_certs_dir}}/{{item}}"
+ register: pwd_file_stat
+ with_items:
+ - hawkular-metrics.pwd
+ - hawkular-metrics.htpasswd
+ - hawkular-jgroups-keystore.pwd
+ changed_when: no
+
+- set_fact:
+ pwd_files: "{{pwd_files | default({}) | combine ({item.item: item.stat}) }}"
+ with_items: "{{pwd_file_stat.results}}"
+ changed_when: no
+
+- name: Create temp directory local on control node
+ local_action: command mktemp -d
+ register: local_tmp
+ changed_when: False
+
- name: generate password for hawkular metrics and jgroups
- copy:
- dest: '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'
- content: "{{ 15 | oo_random_word }}"
+ local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
- hawkular-jgroups-keystore
- when: not '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'|exists
+ when: "not pwd_files['{{ item }}.pwd'].exists"
- name: generate htpasswd file for hawkular metrics
- shell: >
- htpasswd -ci
- '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd' hawkular
- < '{{ openshift_metrics_certs_dir }}/hawkular-metrics.pwd'
- when: >
- not '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd'|exists
+ local_action: >
+ shell htpasswd -ci
+ '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular
+ < '{{ local_tmp.stdout }}/hawkular-metrics.pwd'
+ when: "not pwd_files['hawkular-metrics.htpasswd'].exists"
+
+- name: copy local generated passwords to target
+ copy:
+ src: "{{local_tmp.stdout}}/{{item}}"
+ dest: "{{openshift_metrics_certs_dir}}/{{item}}"
+ with_items:
+ - hawkular-metrics.pwd
+ - hawkular-metrics.htpasswd
+ - hawkular-jgroups-keystore.pwd
+ when: "not pwd_files['{{ item }}'].exists"
- include: import_jks_certs.yaml
+- local_action: file path="{{local_tmp.stdout}}" state=absent
+ changed_when: False
+
- name: read files for the hawkular-metrics secret
shell: >
printf '%s: ' '{{ item }}'
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
index f5192b005..16fd8d9f8 100644
--- a/roles/openshift_metrics/tasks/import_jks_certs.yaml
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -29,10 +29,6 @@
- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd
register: jgroups_keystore_password
- - local_action: command mktemp -d
- register: local_tmp
- changed_when: False
-
- fetch:
dest: "{{local_tmp.stdout}}/"
src: "{{ openshift_metrics_certs_dir }}/{{item}}"
@@ -60,11 +56,6 @@
src: "{{item}}"
with_fileglob: "{{local_tmp.stdout}}/*.*store"
- - file:
- path: "{{local_tmp.stdout}}"
- state: absent
- changed_when: False
-
when: not metrics_keystore.stat.exists or
not metrics_truststore.stat.exists or
not cassandra_keystore.stat.exists or
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 1808db5d5..d03d4176b 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- name: Create temp directory for doing work in
+- name: Create temp directory for doing work in on target
command: mktemp -td openshift-metrics-ansible-XXXXXX
register: mktemp
changed_when: False
diff --git a/roles/openshift_metrics/tasks/scale.yaml b/roles/openshift_metrics/tasks/scale.yaml
deleted file mode 100644
index bb4fa621b..000000000
--- a/roles/openshift_metrics/tasks/scale.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
- -o jsonpath='{.spec.replicas}' -n {{openshift_metrics_project}}
- register: replica_count
- failed_when: "replica_count.rc == 1 and 'exists' not in replica_count.stderr"
- when: not ansible_check_mode
- changed_when: no
-
-- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
- --replicas={{desired}} -n {{openshift_metrics_project}}
- register: scale_result
- failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
- when:
- - replica_count.stdout != (desired | string)
- - not ansible_check_mode
- changed_when: no
-
-- name: Waiting for {{object}} to scale to {{desired}}
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get {{object}} -n {{openshift_metrics_project|quote}} -o jsonpath='{.status.replicas}'
- register: replica_counts
- until: replica_counts.stdout.find("{{desired}}") != -1
- retries: 30
- delay: 10
- when:
- - replica_count.stdout != (desired | string)
- - not ansible_check_mode
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
index c4cae4aff..f02774e47 100644
--- a/roles/openshift_metrics/tasks/start_metrics.yaml
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -10,9 +10,12 @@
changed_when: no
- name: Start Hawkular Cassandra
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 1
with_items: "{{metrics_cassandra_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -28,9 +31,12 @@
changed_when: no
- name: Start Hawkular Metrics
- include: scale.yaml
- vars:
- desired: "{{openshift_metrics_hawkular_replicas}}"
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: "{{openshift_metrics_hawkular_replicas}}"
with_items: "{{metrics_metrics_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -46,9 +52,12 @@
changed_when: no
- name: Start Heapster
- include: scale.yaml
- vars:
- desired: 1
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 1
with_items: "{{metrics_heapster_rc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
index bae181e3e..5a73443a8 100644
--- a/roles/openshift_metrics/tasks/stop_metrics.yaml
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -11,9 +11,12 @@
check_mode: no
- name: Stop Heapster
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_heapster_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -29,9 +32,12 @@
changed_when: "'No resources found' not in metrics_hawkular_rc.stderr"
- name: Stop Hawkular Metrics
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_hawkular_rc.stdout_lines}}"
loop_control:
loop_var: object
@@ -46,9 +52,12 @@
changed_when: "'No resources found' not in metrics_cassandra_rc.stderr"
- name: Stop Hawkular Cassandra
- include: scale.yaml
- vars:
- desired: 0
+ oc_scale:
+ kind: rc
+ name: "{{object.split('/')[1]}}"
+ kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ namespace: "{{openshift_metrics_project}}"
+ replicas: 0
with_items: "{{metrics_cassandra_rc.stdout_lines}}"
loop_control:
loop_var: object
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 0bc9aa45e..b787741d7 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -25,33 +25,15 @@ QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
UPGRADE_MAPPINGS = {
- '3.0': {
- 'minor_version': '3.0',
- 'minor_playbook': 'v3_0_minor/upgrade.yml',
- 'major_version': '3.1',
- 'major_playbook': 'v3_0_to_v3_1/upgrade.yml',
- },
- '3.1': {
- 'minor_version': '3.1',
- 'minor_playbook': 'v3_1_minor/upgrade.yml',
- 'major_playbook': 'v3_1_to_v3_2/upgrade.yml',
- 'major_version': '3.2',
- },
- '3.2': {
- 'minor_version': '3.2',
- 'minor_playbook': 'v3_2/upgrade.yml',
- 'major_playbook': 'v3_3/upgrade.yml',
- 'major_version': '3.3',
- },
- '3.3': {
- 'minor_version': '3.3',
- 'minor_playbook': 'v3_3/upgrade.yml',
- 'major_playbook': 'v3_4/upgrade.yml',
- 'major_version': '3.4',
- },
'3.4': {
'minor_version': '3.4',
'minor_playbook': 'v3_4/upgrade.yml',
+ 'major_playbook': 'v3_5/upgrade.yml',
+ 'major_version': '3.5',
+ },
+ '3.5': {
+ 'minor_version': '3.5',
+ 'minor_playbook': 'v3_5/upgrade.yml',
},
}