summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--README.md4
-rw-r--r--README_AEP.md4
-rw-r--r--README_OSE.md224
-rw-r--r--README_origin.md216
-rw-r--r--filter_plugins/oo_filters.py52
-rw-r--r--filter_plugins/openshift_master.py29
-rw-r--r--filter_plugins/openshift_node.py43
-rw-r--r--inventory/aws/hosts/hosts2
-rw-r--r--inventory/byo/hosts.aep.example53
-rw-r--r--inventory/byo/hosts.openstack2
-rw-r--r--inventory/byo/hosts.origin.example52
-rw-r--r--inventory/byo/hosts.ose.example51
-rw-r--r--inventory/gce/hosts/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts2
-rwxr-xr-xinventory/multi_inventory.py462
-rw-r--r--inventory/multi_inventory.yaml.example51
-rw-r--r--inventory/openstack/hosts/hosts2
-rw-r--r--openshift-ansible.spec167
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml2
-rw-r--r--playbooks/adhoc/setupnfs.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml9
-rw-r--r--playbooks/aws/openshift-cluster/config.yml2
-rw-r--r--playbooks/aws/openshift-cluster/list.yml2
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml2
-rw-r--r--playbooks/aws/openshift-cluster/service.yml4
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml4
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/aws/openshift-cluster/update.yml2
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml6
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml2
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml21
-rw-r--r--playbooks/common/openshift-etcd/config.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml18
-rw-r--r--playbooks/common/openshift-master/restart.yml4
-rw-r--r--playbooks/common/openshift-node/config.yml10
-rw-r--r--playbooks/gce/openshift-cluster/config.yml2
-rw-r--r--playbooks/gce/openshift-cluster/list.yml2
-rw-r--r--playbooks/gce/openshift-cluster/service.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/gce/openshift-cluster/update.yml2
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml6
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml1
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml10
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml6
-rw-r--r--roles/ansible/tasks/config.yml8
-rw-r--r--roles/ansible/tasks/main.yml10
-rw-r--r--roles/ansible_tower/tasks/main.yaml41
-rw-r--r--roles/ansible_tower_cli/README.md50
-rw-r--r--roles/ansible_tower_cli/defaults/main.yml2
-rw-r--r--roles/ansible_tower_cli/handlers/main.yml2
-rw-r--r--roles/ansible_tower_cli/meta/main.yml9
-rw-r--r--roles/ansible_tower_cli/tasks/main.yml18
-rw-r--r--roles/ansible_tower_cli/templates/tower_cli.cfg.j25
-rw-r--r--roles/ansible_tower_cli/vars/main.yml2
-rw-r--r--roles/chrony/README.md31
-rw-r--r--roles/chrony/defaults/main.yml2
-rw-r--r--roles/chrony/handlers/main.yml5
-rw-r--r--roles/chrony/meta/main.yml18
-rw-r--r--roles/chrony/tasks/main.yml30
-rw-r--r--roles/chrony/templates/chrony.conf.j245
-rw-r--r--roles/chrony/vars/main.yml2
-rw-r--r--roles/copr_cli/README.md38
-rw-r--r--roles/copr_cli/defaults/main.yml2
-rw-r--r--roles/copr_cli/handlers/main.yml2
-rw-r--r--roles/copr_cli/meta/main.yml14
-rw-r--r--roles/copr_cli/tasks/main.yml3
-rw-r--r--roles/copr_cli/vars/main.yml2
-rw-r--r--roles/docker/tasks/main.yml17
-rw-r--r--roles/docker_storage_setup/README.md42
-rw-r--r--roles/docker_storage_setup/defaults/main.yml2
-rwxr-xr-xroles/docker_storage_setup/tasks/main.yml95
-rw-r--r--roles/etcd/defaults/main.yaml11
-rw-r--r--roles/etcd/meta/main.yml1
-rw-r--r--roles/etcd/tasks/main.yml39
-rw-r--r--roles/etcd/templates/etcd.conf.j212
-rw-r--r--roles/etcd_ca/tasks/main.yml2
-rw-r--r--roles/etcd_certificates/tasks/client.yml6
-rw-r--r--roles/etcd_certificates/tasks/server.yml12
-rw-r--r--roles/etcd_common/defaults/main.yml8
-rw-r--r--roles/etcd_common/tasks/main.yml13
-rw-r--r--roles/etcd_common/templates/host_int_map.j213
-rw-r--r--roles/flannel/handlers/main.yml4
-rw-r--r--roles/flannel/tasks/main.yml12
-rw-r--r--roles/flannel_register/README.md2
-rw-r--r--roles/flannel_register/defaults/main.yaml2
-rw-r--r--roles/flannel_register/tasks/main.yml6
-rw-r--r--roles/kube_nfs_volumes/README.md2
-rw-r--r--roles/lib_dyn/README.md27
-rw-r--r--roles/lib_dyn/library/dyn_record.py351
-rw-r--r--roles/lib_dyn/meta/main.yml33
-rw-r--r--roles/lib_dyn/tasks/main.yml7
-rw-r--r--roles/lib_openshift_api/build/ansible/edit.py84
-rw-r--r--roles/lib_openshift_api/build/ansible/obj.py139
-rw-r--r--roles/lib_openshift_api/build/ansible/router.py142
-rw-r--r--roles/lib_openshift_api/build/ansible/secret.py121
-rwxr-xr-xroles/lib_openshift_api/build/generate.py64
-rw-r--r--roles/lib_openshift_api/build/src/base.py300
-rw-r--r--roles/lib_openshift_api/build/src/edit.py49
-rw-r--r--roles/lib_openshift_api/build/src/obj.py78
-rw-r--r--roles/lib_openshift_api/build/src/router.py152
-rw-r--r--roles/lib_openshift_api/build/src/secret.py68
-rw-r--r--roles/lib_openshift_api/build/test/README5
-rwxr-xr-xroles/lib_openshift_api/build/test/deploymentconfig.yml120
-rwxr-xr-xroles/lib_openshift_api/build/test/edit.yml53
-rw-r--r--roles/lib_openshift_api/build/test/files/config.yml1
-rw-r--r--roles/lib_openshift_api/build/test/files/dc-mod.yml124
-rw-r--r--roles/lib_openshift_api/build/test/files/dc.yml120
-rw-r--r--roles/lib_openshift_api/build/test/files/passwords.yml4
-rw-r--r--roles/lib_openshift_api/build/test/files/router-mod.json30
-rw-r--r--roles/lib_openshift_api/build/test/files/router.json29
l---------roles/lib_openshift_api/build/test/roles1
-rwxr-xr-xroles/lib_openshift_api/build/test/router.yml79
-rwxr-xr-xroles/lib_openshift_api/build/test/secrets.yml81
-rwxr-xr-xroles/lib_openshift_api/build/test/services.yml133
-rw-r--r--roles/lib_openshift_api/library/oadm_router.py807
-rw-r--r--roles/lib_openshift_api/library/oc_edit.py646
-rw-r--r--roles/lib_openshift_api/library/oc_obj.py730
-rw-r--r--roles/lib_openshift_api/library/oc_secret.py702
-rw-r--r--roles/lib_timedatectl/library/timedatectl.py74
-rw-r--r--roles/lib_yaml_editor/build/ansible/yedit.py69
-rwxr-xr-xroles/lib_yaml_editor/build/generate.py43
-rw-r--r--roles/lib_yaml_editor/build/src/base.py17
-rw-r--r--roles/lib_yaml_editor/build/src/yedit.py209
-rw-r--r--roles/lib_yaml_editor/build/test/foo.yml1
-rwxr-xr-xroles/lib_yaml_editor/build/test/test.yaml15
-rw-r--r--roles/lib_yaml_editor/library/yedit.py300
-rw-r--r--roles/lib_zabbix/README.md38
-rw-r--r--roles/lib_zabbix/library/__init__.py3
-rw-r--r--roles/lib_zabbix/library/zbx_action.py690
-rw-r--r--roles/lib_zabbix/library/zbx_application.py142
-rw-r--r--roles/lib_zabbix/library/zbx_discoveryrule.py205
-rw-r--r--roles/lib_zabbix/library/zbx_graph.py331
-rw-r--r--roles/lib_zabbix/library/zbx_graphprototype.py331
-rw-r--r--roles/lib_zabbix/library/zbx_host.py182
-rw-r--r--roles/lib_zabbix/library/zbx_hostgroup.py116
-rw-r--r--roles/lib_zabbix/library/zbx_httptest.py290
-rw-r--r--roles/lib_zabbix/library/zbx_item.py303
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py327
-rw-r--r--roles/lib_zabbix/library/zbx_itservice.py263
-rw-r--r--roles/lib_zabbix/library/zbx_mediatype.py168
-rw-r--r--roles/lib_zabbix/library/zbx_template.py132
-rw-r--r--roles/lib_zabbix/library/zbx_trigger.py234
-rw-r--r--roles/lib_zabbix/library/zbx_triggerprototype.py177
-rw-r--r--roles/lib_zabbix/library/zbx_user.py192
-rw-r--r--roles/lib_zabbix/library/zbx_user_media.py283
-rw-r--r--roles/lib_zabbix/library/zbx_usergroup.py228
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml146
-rw-r--r--roles/lib_zabbix/tasks/create_user.yml11
-rw-r--r--roles/nuage_master/files/serviceaccount.sh63
-rw-r--r--roles/nuage_master/handlers/main.yaml2
-rw-r--r--roles/nuage_master/tasks/main.yaml14
-rw-r--r--roles/nuage_master/tasks/serviceaccount.yml51
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j26
-rw-r--r--roles/nuage_master/vars/main.yaml17
-rw-r--r--roles/nuage_node/handlers/main.yaml4
-rw-r--r--roles/nuage_node/tasks/main.yaml14
-rw-r--r--roles/nuage_node/vars/main.yaml2
-rw-r--r--roles/openshift_builddefaults/meta/main.yml15
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml24
-rw-r--r--roles/openshift_builddefaults/vars/main.yml15
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml1
-rw-r--r--roles/openshift_common/README.md1
-rw-r--r--roles/openshift_common/tasks/main.yml2
-rw-r--r--roles/openshift_docker/tasks/main.yml7
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml3
-rw-r--r--roles/openshift_etcd/meta/main.yml2
-rw-r--r--roles/openshift_etcd_certificates/meta/main.yml16
-rw-r--r--roles/openshift_etcd_facts/meta/main.yml (renamed from roles/fluentd_master/meta/main.yml)10
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml5
-rwxr-xr-xroles/openshift_examples/examples-sync.sh4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml2
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml438
-rw-r--r--roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml28
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json5
-rw-r--r--roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json5
-rw-r--r--roles/openshift_expand_partition/README.md4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py199
-rw-r--r--roles/openshift_facts/tasks/main.yml15
-rw-r--r--roles/openshift_hosted/tasks/router.yml1
-rw-r--r--roles/openshift_master/defaults/main.yml8
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml33
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j216
-rw-r--r--roles/openshift_master/templates/htpasswd.j25
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j24
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j215
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j215
-rw-r--r--roles/openshift_master_facts/tasks/main.yml9
-rw-r--r--roles/openshift_master_facts/vars/main.yml14
-rw-r--r--roles/openshift_metrics/README.md53
-rw-r--r--roles/openshift_metrics/meta/main.yaml3
-rw-r--r--roles/openshift_metrics/tasks/main.yaml57
-rw-r--r--roles/openshift_metrics/vars/main.yaml19
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j24
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh55
-rw-r--r--roles/openshift_node_dnsmasq/handlers/main.yml5
-rw-r--r--roles/openshift_node_dnsmasq/meta/main.yml15
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml27
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml9
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j24
-rw-r--r--roles/openshift_storage_nfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md4
-rw-r--r--roles/os_ipv6_disable/tasks/main.yaml11
-rw-r--r--roles/os_reboot_server/tasks/main.yaml16
-rw-r--r--roles/os_utils/tasks/main.yaml17
-rw-r--r--roles/os_zabbix/README.md40
-rw-r--r--roles/os_zabbix/defaults/main.yml1
-rw-r--r--roles/os_zabbix/handlers/main.yml1
-rw-r--r--roles/os_zabbix/meta/main.yml9
-rw-r--r--roles/os_zabbix/tasks/main.yml166
-rw-r--r--roles/os_zabbix/vars/main.yml1
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_agent.yml23
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_server.yml412
-rw-r--r--roles/os_zabbix/vars/template_aws.yml25
-rw-r--r--roles/os_zabbix/vars/template_config_loop.yml14
-rw-r--r--roles/os_zabbix/vars/template_docker.yml116
-rw-r--r--roles/os_zabbix/vars/template_heartbeat.yml18
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml458
-rw-r--r--roles/os_zabbix/vars/template_openshift_node.yml70
-rw-r--r--roles/os_zabbix/vars/template_ops_tools.yml54
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml314
-rw-r--r--roles/os_zabbix/vars/template_performance_copilot.yml14
-rw-r--r--roles/os_zabbix/vars/template_zagg_server.yml46
-rw-r--r--roles/oso_host_monitoring/README.md50
-rw-r--r--roles/oso_host_monitoring/defaults/main.yml1
-rw-r--r--roles/oso_host_monitoring/handlers/main.yml6
-rw-r--r--roles/oso_host_monitoring/meta/main.yml8
-rw-r--r--roles/oso_host_monitoring/tasks/main.yml47
-rw-r--r--roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j21
-rw-r--r--roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j278
-rw-r--r--roles/oso_host_monitoring/vars/main.yml1
-rw-r--r--roles/oso_monitoring_tools/README.md54
-rw-r--r--roles/oso_monitoring_tools/defaults/main.yml2
-rw-r--r--roles/oso_monitoring_tools/handlers/main.yml2
-rw-r--r--roles/oso_monitoring_tools/meta/main.yml8
-rw-r--r--roles/oso_monitoring_tools/tasks/main.yml18
-rw-r--r--roles/oso_monitoring_tools/vars/main.yml12
-rw-r--r--roles/tito/README.md38
-rw-r--r--roles/tito/defaults/main.yml2
-rw-r--r--roles/tito/handlers/main.yml2
-rw-r--r--roles/tito/meta/main.yml14
-rw-r--r--roles/tito/tasks/main.yml2
-rw-r--r--roles/tito/vars/main.yml2
-rw-r--r--roles/yum_repos/README.md113
-rw-r--r--roles/yum_repos/defaults/main.yml3
-rw-r--r--roles/yum_repos/meta/main.yml8
-rw-r--r--roles/yum_repos/tasks/main.yml48
-rw-r--r--roles/yum_repos/templates/yumrepo.j218
-rw-r--r--utils/src/ooinstall/cli_installer.py14
-rw-r--r--utils/src/ooinstall/openshift_ansible.py4
-rw-r--r--utils/test/cli_installer_tests.py38
282 files changed, 1668 insertions, 15244 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 4ddfc5f0b..be3a3be19 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.80-1 ./
+3.0.85-1 ./
diff --git a/README.md b/README.md
index d05e3992e..cf78d04f2 100644
--- a/README.md
+++ b/README.md
@@ -21,8 +21,8 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
- [local VMs](README_libvirt.md)
- Bring your own host deployments:
- - [OpenShift Enterprise](README_OSE.md)
- - [OpenShift Origin](README_origin.md)
+ - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
+ - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
- [Atomic Enterprise](README_AEP.md)
- Build
diff --git a/README_AEP.md b/README_AEP.md
index 739c4baeb..1b926f2ab 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -71,8 +71,8 @@ nodes
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
+# If ansible_ssh_user is not root, ansible_become must be set to true
+#ansible_become=yes
# See DEPLOYMENT_TYPES.md
deployment_type=atomic-enterprise
diff --git a/README_OSE.md b/README_OSE.md
deleted file mode 100644
index f7615ae38..000000000
--- a/README_OSE.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Installing OSEv3 from dev puddles using ansible
-
-* [Requirements](#requirements)
-* [Caveats](#caveats)
-* [Known Issues](#known-issues)
-* [Configuring the host inventory](#configuring-the-host-inventory)
-* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
-* [Running the ansible playbooks](#running-the-ansible-playbooks)
-* [Post-ansible steps](#post-ansible-steps)
-* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
-
-## Requirements
-* ansible 1.9.4
- * Available in Fedora channels
- * Available for EL with EPEL and Optional channel
-* One or more RHEL 7.1 VMs
-* Either ssh key based auth for the root user or ssh key based auth for a user
- with sudo access (no password)
-* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
-
- ```sh
- git clone https://github.com/openshift/openshift-ansible.git
- cd openshift-ansible
- ```
-
-## Caveats
-This ansible repo is currently under heavy revision for providing OSE support;
-the following items are highly likely to change before the OSE support is
-merged into the upstream repo:
- * the current git branch for testing
- * how the inventory file should be configured
- * variables that need to be set
- * bootstrapping steps
- * other configuration steps
-
-## Known Issues
-* Host subscriptions are not configurable yet, the hosts need to be
- pre-registered with subscription-manager or have the RHEL base repo
- pre-configured. If using subscription-manager the following commands will
- disable all but the rhel-7-server rhel-7-server-extras and
- rhel-server7-ose-beta repos:
-```sh
-subscription-manager repos --disable="*"
-subscription-manager repos \
---enable="rhel-7-server-rpms" \
---enable="rhel-7-server-extras-rpms" \
---enable="rhel-7-server-ose-3.0-rpms"
-```
-* Configuration of router is not automated yet
-* Configuration of docker-registry is not automated yet
-
-## Configuring the host inventory
-[Ansible docs](http://docs.ansible.com/intro_inventory.html)
-
-Example inventory file for configuring one master and two nodes for the test
-environment. This can be configured in the default inventory file
-(/etc/ansible/hosts), or using a custom file and passing the --inventory
-option to ansible-playbook.
-
-/etc/ansible/hosts:
-```ini
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
-
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
-
-# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
-'baseurl':
-'http://buildvm/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
-'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
-'OpenShift Origin COPR', 'baseurl':
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
-'enabled': 1, 'gpgcheck': 1, gpgkey:
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-
-# host group for masters
-[masters]
-ose3-master.example.com
-
-# host group for nodes
-[nodes]
-ose3-master.example.com
-ose3-node[1:2].example.com
-```
-
-The hostnames above should resolve both from the hosts themselves and
-the host where ansible is running (if different).
-
-A more complete example inventory file ([hosts.ose.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
-
-## Running the ansible playbooks
-From the openshift-ansible checkout run:
-```sh
-ansible-playbook playbooks/byo/config.yml
-```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
-inventory file use the -i option for ansible-playbook.
-
-## Post-ansible steps
-
-You should now be ready to follow the [What's Next?](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
-
-## Overriding detected ip addresses and hostnames
-Some deployments will require that the user override the detected hostnames
-and ip addresses for the hosts. To see what the default values will be you can
-run the openshift_facts playbook:
-```sh
-ansible-playbook playbooks/byo/openshift_facts.yml
-```
-The output will be similar to:
-```
-ok: [10.3.9.45] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "ip": "172.16.4.79",
- "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "public_ip": "10.3.9.45",
- "use_openshift_sdn": true
- },
- "provider": {
- ... <snip> ...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.42] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "ip": "172.16.4.75",
- "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "public_ip": "10.3.9.42",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.36] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "ip": "172.16.4.73",
- "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "public_ip": "10.3.9.36",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-```
-Now, we want to verify the detected common settings to verify that they are
-what we expect them to be (if not, we can override them).
-
-* hostname
- * Should resolve to the internal ip from the instances themselves.
- * openshift_hostname will override.
-* ip
- * Should be the internal ip of the instance.
- * openshift_ip will override.
-* public hostname
- * Should resolve to the external ip from hosts outside of the cloud
- * provider openshift_public_hostname will override.
-* public_ip
- * Should be the externally accessible ip associated with the instance
- * openshift_public_ip will override
-* use_openshift_sdn
- * Should be true unless the cloud is GCE.
- * openshift_use_openshift_sdn overrides
-
-To override the the defaults, you can set the variables in your inventory:
-```
-...snip...
-[masters]
-ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
-...snip...
-```
diff --git a/README_origin.md b/README_origin.md
deleted file mode 100644
index b0169946b..000000000
--- a/README_origin.md
+++ /dev/null
@@ -1,216 +0,0 @@
-# Installing OpenShift Origin against existing hosts
-
-* [Requirements](#requirements)
-* [Caveats](#caveats)
-* [Known Issues](#known-issues)
-* [Configuring the host inventory](#configuring-the-host-inventory)
-* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
-* [Running the ansible playbooks](#running-the-ansible-playbooks)
-* [Post-ansible steps](#post-ansible-steps)
-* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
-
-## Requirements
-* ansible 1.9.4
- * Available in Fedora channels
- * Available for EL with EPEL and Optional channel
-* One or more RHEL 7.1+, CentOS 7.1+, or Fedora 23+ VMs
-* Either ssh key based auth for the root user or ssh key based auth for a user
- with sudo access (no password)
-* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
-
- ```sh
- git clone https://github.com/openshift/openshift-ansible.git
- cd openshift-ansible
- ```
-## Known Issues
-* RHEL - Host subscriptions are not configurable yet, the hosts need to be
- pre-registered with subscription-manager or have the RHEL base repo
- pre-configured. If using subscription-manager the following commands will
- disable all but the rhel-7-server rhel-7-server-extras and
- rhel-server7-ose-beta repos:
-```sh
-subscription-manager repos --disable="*"
-subscription-manager repos \
---enable="rhel-7-server-rpms" \
---enable="rhel-7-server-extras-rpms" \
---enable="rhel-7-server-ose-3.0-rpms"
-```
-* Configuration of router is not automated yet
-* Configuration of docker-registry is not automated yet
-* Fedora 23+ doesn't come with python2 and will need a quick bootstrap. Setup
- your inventory as described below and run the following (substituting the
- `$PATH_TO_INVENTORY_FILE` with the actual path to your inventory file):
-```sh
-ansible-playbook ./playbooks/adhoc/bootstrap-fedora.yml -i $PATH_TO_INVENTORY_FILE
-```
-
-## Configuring the host inventory
-[Ansible docs](http://docs.ansible.com/intro_inventory.html)
-
-Example inventory file for configuring one master and two nodes for the test
-environment. This can be configured in the default inventory file
-(/etc/ansible/hosts), or using a custom file and passing the --inventory
-option to ansible-playbook.
-
-/etc/ansible/hosts:
-```ini
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
-
-deployment_type=origin
-
-# host group for masters
-[masters]
-osv3-master.example.com
-
-# host group for nodes
-[nodes]
-osv3-master.example.com
-osv3-node[1:2].example.com
-
-# host group for etcd
-[etcd]
-osv3-etcd[1:3].example.com
-
-[lb]
-osv3-lb.example.com
-
-```
-
-The hostnames above should resolve both from the hosts themselves and
-the host where ansible is running (if different).
-
-A more complete example inventory file ([hosts.origin.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.origin.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
-
-## Running the ansible playbooks
-From the openshift-ansible checkout run:
-```sh
-ansible-playbook playbooks/byo/config.yml
-```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
-inventory file use the -i option for ansible-playbook.
-
-## Post-ansible steps
-
-You should now be ready to follow the [What's Next?](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
-
-## Overriding detected ip addresses and hostnames
-Some deployments will require that the user override the detected hostnames
-and ip addresses for the hosts. To see what the default values will be you can
-run the openshift_facts playbook:
-```sh
-ansible-playbook playbooks/byo/openshift_facts.yml
-```
-The output will be similar to:
-```
-ok: [10.3.9.45] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "ip": "172.16.4.79",
- "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
- "public_ip": "10.3.9.45",
- "use_openshift_sdn": true
- },
- "provider": {
- ... <snip> ...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.42] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "ip": "172.16.4.75",
- "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
- "public_ip": "10.3.9.42",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-ok: [10.3.9.36] => {
- "result": {
- "ansible_facts": {
- "openshift": {
- "common": {
- "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "ip": "172.16.4.73",
- "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
- "public_ip": "10.3.9.36",
- "use_openshift_sdn": true
- },
- "provider": {
- ...<snip>...
- }
- }
- },
- "changed": false,
- "invocation": {
- "module_args": "",
- "module_name": "openshift_facts"
- }
- }
-}
-```
-Now, we want to verify the detected common settings to verify that they are
-what we expect them to be (if not, we can override them).
-
-* hostname
- * Should resolve to the internal ip from the instances themselves.
- * openshift_hostname will override.
-* ip
- * Should be the internal ip of the instance.
- * openshift_ip will override.
-* public hostname
- * Should resolve to the external ip from hosts outside of the cloud
- * provider openshift_public_hostname will override.
-* public_ip
- * Should be the externally accessible ip associated with the instance
- * openshift_public_ip will override
-* use_openshift_sdn
- * Should be true unless the cloud is GCE.
- * openshift_use_openshift_sdn overrides
-
-To override the the defaults, you can set the variables in your inventory:
-```
-...snip...
-[masters]
-osv3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=osv3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=osv3-master.public.example.com
-...snip...
-```
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index f6cc2edde..b08670678 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -6,6 +6,7 @@ Custom filters for use in openshift-ansible
"""
from ansible import errors
+from collections import Mapping
from operator import itemgetter
import OpenSSL.crypto
import os
@@ -128,14 +129,14 @@ class FilterModule(object):
returns [1, 3]
"""
- if not isinstance(data, dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict")
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys if data.has_key(key)]
+ retval = [data[key] for key in keys if key in data]
return retval
@@ -259,8 +260,11 @@ class FilterModule(object):
@staticmethod
def oo_split(string, separator=','):
- """ This splits the input string into a list
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
"""
+ if isinstance(string, list):
+ return string
return string.split(separator)
@staticmethod
@@ -296,7 +300,7 @@ class FilterModule(object):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
- return [x for x in data if x.has_key(filter_attr) and x[filter_attr]]
+ return [x for x in data if filter_attr in x and x[filter_attr]]
@staticmethod
def oo_oc_nodes_matching_selector(nodes, selector):
@@ -311,6 +315,16 @@ class FilterModule(object):
"color": "red"}}}]
selector = 'color=green'
returns = ['node1.example.com']
+
+ nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com",
+ "labels": {"kubernetes.io/hostname": "node1.example.com",
+ "color": "green"}}},
+ {"kind": "Node", "metadata": {"name": "node2.example.com",
+ "labels": {"kubernetes.io/hostname": "node2.example.com",
+ "color": "red"}}}]
+ selector = 'color=green,color=red'
+ returns = ['node1.example.com','node2.example.com']
+
Args:
nodes (list[dict]): list of node definitions
selector (str): "label=value" node selector to filter `nodes` by
@@ -323,9 +337,15 @@ class FilterModule(object):
raise errors.AnsibleFilterError("failed expects selector to be a string")
if not re.match('.*=.*', selector):
raise errors.AnsibleFilterError("failed selector does not match \"label=value\" format")
- label = selector.split('=')[0]
- value = selector.split('=')[1]
- return FilterModule.oo_oc_nodes_with_label(nodes, label, value)
+ node_lists = []
+ for node_selector in ''.join(selector.split()).split(','):
+ label = node_selector.split('=')[0]
+ value = node_selector.split('=')[1]
+ node_lists.append(FilterModule.oo_oc_nodes_with_label(nodes, label, value))
+ nodes = set(node_lists[0])
+ for node_list in node_lists[1:]:
+ nodes.intersection_update(node_list)
+ return list(nodes)
@staticmethod
def oo_oc_nodes_with_label(nodes, label, value):
@@ -634,7 +654,9 @@ class FilterModule(object):
@staticmethod
def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_"
+ ''' Return facts which begin with "openshift_" and translate
+ legacy facts to their openshift_env counterparts.
+
Ex: hostvars = {'openshift_fact': 42,
'theyre_taking_the_hobbits_to': 'isengard'}
returns = {'openshift_fact': 42}
@@ -647,6 +669,11 @@ class FilterModule(object):
for key in hostvars:
if regex.match(key):
facts[key] = hostvars[key]
+
+ migrations = {'openshift_router_selector': 'openshift_hosted_router_selector'}
+ for old_fact, new_fact in migrations.iteritems():
+ if old_fact in facts and new_fact not in facts:
+ facts[new_fact] = facts[old_fact]
return facts
@staticmethod
@@ -794,15 +821,18 @@ class FilterModule(object):
def oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
- are ignored.
+ are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, basestring):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
-
+ # TODO: Do we need to make this actually convert v1.2.0-rc1 into 1.2.0-0.rc1
+ # We'd need to be really strict about how we build the RPM Version+Release
if version.startswith("v"):
version = version.replace("v", "")
+ version = version.split('-')[0]
if include_dash:
version = "-" + version
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 34d9aef75..c21709fe3 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -9,8 +9,12 @@ import sys
import yaml
from ansible import errors
-from ansible.runner.filter_plugins.core import bool as ansible_bool
+# pylint: disable=no-name-in-module,import-error
+try:
+ from ansible.runner.filter_plugins.core import bool as ansible_bool
+except ImportError:
+ from ansible.plugins.filter.core import bool as ansible_bool
class IdentityProviderBase(object):
""" IdentityProviderBase
@@ -527,9 +531,30 @@ class FilterModule(object):
'openshift-master.kubeconfig']
return certs
+ @staticmethod
+ def oo_htpasswd_users_from_file(file_contents):
+ ''' return a dictionary of htpasswd users from htpasswd file contents '''
+ htpasswd_entries = {}
+ if not isinstance(file_contents, basestring):
+ raise errors.AnsibleFilterError("failed, expects to filter on a string")
+ for line in file_contents.splitlines():
+ user = None
+ passwd = None
+ if len(line) == 0:
+ continue
+ if ':' in line:
+ user, passwd = line.split(':', 1)
+
+ if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
+ error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
+ raise errors.AnsibleFilterError(error_msg)
+ htpasswd_entries[user] = passwd
+ return htpasswd_entries
+
def filters(self):
''' returns a mapping of filters to methods '''
return {"translate_idps": self.translate_idps,
"validate_pcs_cluster": self.validate_pcs_cluster,
- "certificates_to_synchronize": self.certificates_to_synchronize}
+ "certificates_to_synchronize": self.certificates_to_synchronize,
+ "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}
diff --git a/filter_plugins/openshift_node.py b/filter_plugins/openshift_node.py
new file mode 100644
index 000000000..4ef92ba03
--- /dev/null
+++ b/filter_plugins/openshift_node.py
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-node
+'''
+from ansible import errors
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by openshift_node role'''
+
+ @staticmethod
+ def get_dns_ip(openshift_dns_ip, hostvars):
+ ''' Navigates the complicated logic of when to set dnsIP
+
+ In all situations if they've set openshift_dns_ip use that
+ For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None
+ For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip)
+ For 1.2/3.2+ installs we set to the node's default interface ip
+ '''
+
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ # We always use what they've specified if they've specified a value
+ if openshift_dns_ip != None:
+ return openshift_dns_ip
+
+ if bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
+ return hostvars['ansible_default_ipv4']['address']
+ elif bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
+ if 'openshift_master_cluster_vip' in hostvars:
+ return hostvars['openshift_master_cluster_vip']
+ else:
+ if 'openshift_master_cluster_vip' in hostvars:
+ return hostvars['openshift_master_cluster_vip']
+ elif 'openshift_node_first_master_ip' in hostvars:
+ return hostvars['openshift_node_first_master_ip']
+ return None
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {'get_dns_ip': self.get_dns_ip}
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/aws/hosts/hosts
+++ b/inventory/aws/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index 62891e6c3..71bab31f4 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -14,9 +14,9 @@ lb
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all Atomic Enterprise components (Defaults to 2)
debug_level=2
@@ -75,7 +75,7 @@ deployment_type=atomic-enterprise
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -87,12 +87,26 @@ deployment_type=atomic-enterprise
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -336,6 +350,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
+
+
# host group for masters
[masters]
aep3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.openstack b/inventory/byo/hosts.openstack
index 05df75c2f..ea7e905cb 100644
--- a/inventory/byo/hosts.openstack
+++ b/inventory/byo/hosts.openstack
@@ -10,7 +10,7 @@ lb
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
ansible_ssh_user=cloud-user
-ansible_sudo=true
+ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 1f13aade6..4eb28bdfb 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -15,9 +15,9 @@ nfs
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
@@ -76,7 +76,7 @@ deployment_type=origin
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
# only need to inject your own registry you may want to consider
@@ -92,12 +92,26 @@ deployment_type=origin
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -341,6 +355,38 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 2386eb236..98d484ecb 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -14,9 +14,9 @@ lb
# ssh agent.
ansible_ssh_user=root
-# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
-#ansible_sudo=true
+#ansible_become=yes
# Debug level for all OpenShift components (Defaults to 2)
debug_level=2
@@ -75,7 +75,7 @@ deployment_type=openshift-enterprise
#openshift_docker_options="-l warn --ipv6=false"
# Deprecated methods to set --log-driver and --log-opts flags, use openshift_docker_options instead
#openshift_docker_log_driver=json
-#openshift_docker_log_options="tag=mailer"
+#openshift_docker_log_options=["tag=mailer"]
# Alternate image format string. If you're not modifying the format string and
@@ -88,12 +88,26 @@ deployment_type=openshift-enterprise
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
+# Defining htpasswd users
+#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'
+# or
+#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': '', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
+# Configuring the ldap ca certificate
+#openshift_master_ldap_ca=<ca text>
+# or
+#openshift_master_ldap_ca_file=<path to local ca file to use>
+
+# Available variables for configuring certificates for other identity providers:
+#openshift_master_openid_ca
+#openshift_master_openid_ca_file
+#openshift_master_request_header_ca
+#openshift_master_request_header_ca_file
# Cloud Provider Configuration
#
@@ -337,6 +351,37 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
+# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
+# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
+# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
+# be used with 1.0 and 3.0.
+# openshift_use_dnsmasq=False
+
+# Global Proxy Configuration
+# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
+# variables for docker and master services.
+#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
+#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
+#openshift_no_proxy='.hosts.example.com,some-host.com'
+#
+# Most environments don't require a proxy between openshift masters, nodes, and
+# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
+# If all of your hosts share a common domain you may wish to disable this and
+# specify that domain above.
+#openshift_generate_no_proxy_hosts: True
+#
+# These options configure the BuildDefaults admission controller which injects
+# environment variables into Builds. These values will default to their
+# corresponding values above but you may set them independently. See BuildDefaults
+# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_builddefaults_http_proxy=openshift_http_proxy
+#openshift_builddefaults_https_proxy=openshift_https_proxy
+#openshift_builddefaults_no_proxy=openshift_noproxy
+#openshift_builddefaults_git_http_proxy=openshift_builddefaults_http_proxy
+#openshift_builddefaults_git_https_proxy=openshift_builddefaults_https_proxy
+# Or you may optionally define your own serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","kind":"BuildDefaultsConfig"}}}'
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/gce/hosts/hosts
+++ b/inventory/gce/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
index bf4e0845a..3996e577e 100644
--- a/inventory/libvirt/hosts/hosts
+++ b/inventory/libvirt/hosts/hosts
@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'
+localhost ansible_connection=local ansible_become=no ansible_python_interpreter='/usr/bin/env python2'
diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py
deleted file mode 100755
index be597267e..000000000
--- a/inventory/multi_inventory.py
+++ /dev/null
@@ -1,462 +0,0 @@
-#!/usr/bin/env python2
-'''
- Fetch and combine multiple inventory account settings into a single
- json hash.
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-from time import time
-import argparse
-import yaml
-import os
-import subprocess
-import json
-import errno
-import fcntl
-import tempfile
-import copy
-from string import Template
-import shutil
-
-CONFIG_FILE_NAME = 'multi_inventory.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
-
-class MultiInventoryException(Exception):
- '''Exceptions for MultiInventory class'''
- pass
-
-# pylint: disable=too-many-public-methods
-# After a refactor of too-many-branches and placing those branches into
-# their own corresponding function, we have passed the allowed amount of functions(20).
-class MultiInventory(object):
- '''
- MultiInventory class:
- Opens a yaml config file and reads aws credentials.
- Stores a json hash of resources in result.
- '''
-
- def __init__(self, args=None):
- # Allow args to be passed when called as a library
- if not args:
- self.args = {}
- else:
- self.args = args
-
- self.cache_path = DEFAULT_CACHE_PATH
- self.config = None
- self.all_inventory_results = {}
- self.result = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
- etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
-
- # Prefer a file in the same directory, fall back to a file in etc
- if os.path.isfile(same_dir_config_file):
- self.config_file = same_dir_config_file
- elif os.path.isfile(etc_dir_config_file):
- self.config_file = etc_dir_config_file
- else:
- self.config_file = None # expect env vars
-
- # load yaml
- if self.config_file and os.path.isfile(self.config_file):
- self.config = self.load_yaml_config()
- elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
- os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
- # Build a default config
- self.config = {}
- self.config['accounts'] = [
- {
- 'name': 'default',
- 'cache_location': DEFAULT_CACHE_PATH,
- 'provider': 'aws/hosts/ec2.py',
- 'env_vars': {
- 'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
- 'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
- }
- },
- ]
-
- self.config['cache_max_age'] = 300
- else:
- raise RuntimeError("Could not find valid ec2 credentials in the environment.")
-
- if self.config.has_key('cache_location'):
- self.cache_path = self.config['cache_location']
-
- def run(self):
- '''This method checks to see if the local
- cache is valid for the inventory.
-
- if the cache is valid; return cache
- else the credentials are loaded from multi_inventory.yaml or from the env
- and we attempt to get the inventory from the provider specified.
- '''
-
- if self.args.get('refresh_cache', None):
- self.get_inventory()
- self.write_to_cache()
- # if its a host query, fetch and do not cache
- elif self.args.get('host', None):
- self.get_inventory()
- elif not self.is_cache_valid():
- # go fetch the inventories and cache them if cache is expired
- self.get_inventory()
- self.write_to_cache()
- else:
- # get data from disk
- self.get_inventory_from_cache()
-
- def load_yaml_config(self, conf_file=None):
- """Load a yaml config file with credentials to query the
- respective cloud for inventory.
- """
- config = None
-
- if not conf_file:
- conf_file = self.config_file
-
- with open(conf_file) as conf:
- config = yaml.safe_load(conf)
-
- # Provide a check for unique account names
- if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
- raise MultiInventoryException('Duplicate account names in config file')
-
- return config
-
- def get_provider_tags(self, provider, env=None):
- """Call <provider> and query all of the tags that are usuable
- by ansible. If environment is empty use the default env.
- """
- if not env:
- env = os.environ
-
- # Allow relatively path'd providers in config file
- if os.path.isfile(os.path.join(self.file_path, provider)):
- provider = os.path.join(self.file_path, provider)
-
- # check to see if provider exists
- if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
- raise RuntimeError("Problem with the provider. Please check path " \
- "and that it is executable. (%s)" % provider)
-
- cmds = [provider]
- if self.args.get('host', None):
- cmds.append("--host")
- cmds.append(self.args.get('host', None))
- else:
- cmds.append('--list')
-
- if 'aws' in provider.lower():
- cmds.append('--refresh-cache')
-
- return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
- stdout=subprocess.PIPE, env=env)
-
- @staticmethod
- def generate_config(provider_files):
- """Generate the provider_files in a temporary directory.
- """
- prefix = 'multi_inventory.'
- tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
- for provider_file in provider_files:
- filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
- content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
- filedes.write(content)
- filedes.close()
-
- return tmp_dir_path
-
- def run_provider(self):
- '''Setup the provider call with proper variables
- and call self.get_provider_tags.
- '''
- try:
- all_results = []
- tmp_dir_paths = []
- processes = {}
- for account in self.config['accounts']:
- tmp_dir = None
- if account.has_key('provider_files'):
- tmp_dir = MultiInventory.generate_config(account['provider_files'])
- tmp_dir_paths.append(tmp_dir)
-
- # Update env vars after creating provider_config_files
- # so that we can grab the tmp_dir if it exists
- env = account.get('env_vars', {})
- if env and tmp_dir:
- for key, value in env.items():
- env[key] = Template(value).substitute(tmpdir=tmp_dir)
-
- name = account['name']
- provider = account['provider']
- processes[name] = self.get_provider_tags(provider, env)
-
- # for each process collect stdout when its available
- for name, process in processes.items():
- out, err = process.communicate()
- all_results.append({
- "name": name,
- "out": out.strip(),
- "err": err.strip(),
- "code": process.returncode
- })
-
- finally:
- # Clean up the mkdtemp dirs
- for tmp_dir in tmp_dir_paths:
- shutil.rmtree(tmp_dir)
-
- return all_results
-
- def get_inventory(self):
- """Create the subprocess to fetch tags from a provider.
- Host query:
- Query to return a specific host. If > 1 queries have
- results then fail.
-
- List query:
- Query all of the different accounts for their tags. Once completed
- store all of their results into one merged updated hash.
- """
- provider_results = self.run_provider()
-
- # process --host results
- # For any 0 result, return it
- if self.args.get('host', None):
- count = 0
- for results in provider_results:
- if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
- self.result = json.loads(results['out'])
- count += 1
- if count > 1:
- raise RuntimeError("Found > 1 results for --host %s. \
- This is an invalid state." % self.args.get('host', None))
- # process --list results
- else:
- # For any non-zero, raise an error on it
- for result in provider_results:
- if result['code'] != 0:
- err_msg = ['\nProblem fetching account: {name}',
- 'Error Code: {code}',
- 'StdErr: {err}',
- 'Stdout: {out}',
- ]
- raise RuntimeError('\n'.join(err_msg).format(**result))
- else:
- self.all_inventory_results[result['name']] = json.loads(result['out'])
-
- # Check if user wants extra vars in yaml by
- # having hostvars and all_group defined
- for acc_config in self.config['accounts']:
- self.apply_account_config(acc_config)
-
- # Build results by merging all dictionaries
- values = self.all_inventory_results.values()
- values.insert(0, self.result)
- for result in values:
- MultiInventory.merge_destructively(self.result, result)
-
- def add_entry(self, data, keys, item):
- ''' Add an item to a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- keys = a.b
- item = c
- '''
- if "." in keys:
- key, rest = keys.split(".", 1)
- if key not in data:
- data[key] = {}
- self.add_entry(data[key], rest, item)
- else:
- data[keys] = item
-
- def get_entry(self, data, keys):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- keys = a.b
- return c
- '''
- if keys and "." in keys:
- key, rest = keys.split(".", 1)
- return self.get_entry(data[key], rest)
- else:
- return data.get(keys, None)
-
- def apply_extra_vars(self, inventory, extra_vars):
- ''' Apply the account config extra vars '''
- # Extra vars go here
- for new_var, value in extra_vars.items():
- for data in inventory.values():
- self.add_entry(data, new_var, value)
-
- def apply_clone_vars(self, inventory, clone_vars):
- ''' Apply the account config clone vars '''
- # Clone vars go here
- for to_name, from_name in clone_vars.items():
- for data in inventory.values():
- self.add_entry(data, to_name, self.get_entry(data, from_name))
-
- def apply_extra_groups(self, inventory, extra_groups):
- ''' Apply the account config for extra groups '''
- _ = self # Here for pylint. wanted an instance method instead of static
- for new_var, value in extra_groups.items():
- for _ in inventory['_meta']['hostvars'].values():
- inventory["%s_%s" % (new_var, value)] = copy.copy(inventory['all_hosts'])
-
- def apply_clone_groups(self, inventory, clone_groups):
- ''' Apply the account config for clone groups '''
- for to_name, from_name in clone_groups.items():
- for name, data in inventory['_meta']['hostvars'].items():
- key = '%s_%s' % (to_name, self.get_entry(data, from_name))
- if not inventory.has_key(key):
- inventory[key] = []
- inventory[key].append(name)
-
- def apply_group_selectors(self, inventory, group_selectors):
- ''' Apply the account config for group selectors '''
- _ = self # Here for pylint. wanted an instance method instead of static
- # There could be multiple clusters per account. We need to process these selectors
- # based upon the oo_clusterid_ variable.
- clusterids = [group for group in inventory if "oo_clusterid_" in group]
-
- for clusterid in clusterids:
- for selector in group_selectors:
- if inventory.has_key(selector['from_group']):
- hosts = list(set(inventory[clusterid]) & set(inventory[selector['from_group']]))
- hosts.sort()
-
- # Multiple clusters in an account
- if inventory.has_key(selector['name']):
- inventory[selector['name']].extend(hosts[0:selector['count']])
- else:
- inventory[selector['name']] = hosts[0:selector['count']]
-
- for host in hosts:
- if host in inventory[selector['name']]:
- inventory['_meta']['hostvars'][host][selector['name']] = True
- else:
- inventory['_meta']['hostvars'][host][selector['name']] = False
-
- def apply_account_config(self, acc_config):
- ''' Apply account config settings '''
- results = self.all_inventory_results[acc_config['name']]
- results['all_hosts'] = results['_meta']['hostvars'].keys()
-
- self.apply_extra_vars(results['_meta']['hostvars'], acc_config.get('extra_vars', {}))
-
- self.apply_clone_vars(results['_meta']['hostvars'], acc_config.get('clone_vars', {}))
-
- self.apply_extra_groups(results, acc_config.get('extra_groups', {}))
-
- self.apply_clone_groups(results, acc_config.get('clone_groups', {}))
-
- self.apply_group_selectors(results, acc_config.get('group_selectors', {}))
-
- # store the results back into all_inventory_results
- self.all_inventory_results[acc_config['name']] = results
-
- @staticmethod
- def merge_destructively(input_a, input_b):
- "merges b into input_a"
- for key in input_b:
- if key in input_a:
- if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
- MultiInventory.merge_destructively(input_a[key], input_b[key])
- elif input_a[key] == input_b[key]:
- pass # same leaf value
- # both lists so add each element in b to a if it does ! exist
- elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
- for result in input_b[key]:
- if result not in input_a[key]:
- input_a[key].append(result)
- # a is a list and not b
- elif isinstance(input_a[key], list):
- if input_b[key] not in input_a[key]:
- input_a[key].append(input_b[key])
- elif isinstance(input_b[key], list):
- input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
- else:
- input_a[key] = [input_a[key], input_b[key]]
- else:
- input_a[key] = input_b[key]
- return input_a
-
- def is_cache_valid(self):
- ''' Determines if the cache files have expired, or if it is still valid '''
-
- if os.path.isfile(self.cache_path):
- mod_time = os.path.getmtime(self.cache_path)
- current_time = time()
- if (mod_time + self.config['cache_max_age']) > current_time:
- return True
-
- return False
-
- def parse_cli_args(self):
- ''' Command line argument processing '''
-
- parser = argparse.ArgumentParser(
- description='Produce an Ansible Inventory file based on a provider')
- parser.add_argument('--refresh-cache', action='store_true', default=False,
- help='Fetch cached only instances (default: False)')
- parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
- parser.add_argument('--host', action='store', default=False,
- help='Get all the variables about a specific instance')
- self.args = parser.parse_args().__dict__
-
- def write_to_cache(self):
- ''' Writes data in JSON format to a file '''
-
- # if it does not exist, try and create it.
- if not os.path.isfile(self.cache_path):
- path = os.path.dirname(self.cache_path)
- try:
- os.makedirs(path)
- except OSError as exc:
- if exc.errno != errno.EEXIST or not os.path.isdir(path):
- raise
-
- json_data = MultiInventory.json_format_dict(self.result, True)
- with open(self.cache_path, 'w') as cache:
- try:
- fcntl.flock(cache, fcntl.LOCK_EX)
- cache.write(json_data)
- finally:
- fcntl.flock(cache, fcntl.LOCK_UN)
-
- def get_inventory_from_cache(self):
- ''' Reads the inventory from the cache file and returns it as a JSON
- object '''
-
- if not os.path.isfile(self.cache_path):
- return None
-
- with open(self.cache_path, 'r') as cache:
- self.result = json.loads(cache.read())
-
- return True
-
- @classmethod
- def json_format_dict(cls, data, pretty=False):
- ''' Converts a dict to a JSON object and dumps it as a formatted
- string '''
-
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
-
- def result_str(self):
- '''Return cache string stored in self.result'''
- return self.json_format_dict(self.result, True)
-
-
-if __name__ == "__main__":
- MI2 = MultiInventory()
- MI2.parse_cli_args()
- MI2.run()
- print MI2.result_str()
diff --git a/inventory/multi_inventory.yaml.example b/inventory/multi_inventory.yaml.example
deleted file mode 100644
index 0f0788d18..000000000
--- a/inventory/multi_inventory.yaml.example
+++ /dev/null
@@ -1,51 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_inventory.cache
-
-accounts:
- - name: aws1
- provider: aws/ec2.py
- provider_files:
- - name: ec2.ini
- content: |-
- [ec2]
- regions = all
- regions_exclude = us-gov-west-1,cn-north-1
- destination_variable = public_dns_name
- route53 = False
- cache_path = ~/.ansible/tmp
- cache_max_age = 300
- vpc_destination_variable = ip_address
- env_vars:
- AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
- AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
- extra_vars:
- cloud: aws
- account: aws1
-
-- name: mygce
- extra_vars:
- cloud: gce
- account: gce1
- env_vars:
- GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
- provider: gce/gce.py
- provider_files:
- - name: priv_key.pem
- contents: |-
- -----BEGIN PRIVATE KEY-----
- yourprivatekeydatahere
- -----END PRIVATE KEY-----
- - name: gce.ini
- contents: |-
- [gce]
- gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
- gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
- gce_project_id = gce-project
- zone = us-central1-a
- network = default
- gce_machine_type = n1-standard-2
- gce_machine_image = rhel7
-
-cache_max_age: 600
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
index 2d2194a4d..9b63e98f4 100644
--- a/inventory/openstack/hosts/hosts
+++ b/inventory/openstack/hosts/hosts
@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local
+localhost ansible_become=no ansible_python_interpreter='/usr/bin/env python2' connection=local
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 00f8f58bf..0cefca87b 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.80
+Version: 3.0.85
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -15,6 +15,7 @@ BuildArch: noarch
Requires: ansible >= 1.9.4
Requires: python2
+Requires: openshift-ansible-docs = %{version}-%{release}
%description
Openshift and Atomic Enterprise Ansible
@@ -43,30 +44,14 @@ mkdir -p %{buildroot}%{_bindir}
mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
mkdir -p %{buildroot}/etc/bash_completion.d
mkdir -p %{buildroot}/etc/openshift_ansible
-cp -p bin/{ossh,oscp,opssh,opscp,ohi} %{buildroot}%{_bindir}
-cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
-cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
# Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
-ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
-ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/gce %{buildroot}%{python_sitelib}/openshift_ansible/gce
# openshift-ansible-docs install
-# -docs are currently just %doc, no install needed
-
-# openshift-ansible-inventory install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
-mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
-cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
-cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
+# Install example inventory into docs/examples
+mkdir -p docs/example-inventories
+cp inventory/byo/* docs/example-inventories/
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
@@ -74,10 +59,6 @@ cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
# openshift-ansible-roles install
cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
-# openshift-ansible-zabbix install (standalone lib_zabbix library)
-mkdir -p %{buildroot}%{_datadir}/ansible/zabbix
-cp -rp roles/lib_zabbix/library/* %{buildroot}%{_datadir}/ansible/zabbix/
-
# openshift-ansible-filter-plugins install
cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
@@ -99,42 +80,6 @@ popd
%dir %{_datadir}/ansible/%{name}
# ----------------------------------------------------------------------------------
-# openshift-ansible-bin subpackage
-# ----------------------------------------------------------------------------------
-%package bin
-Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
-Requires: %{name} = %{version}
-Requires: %{name}-inventory = %{version}
-Requires: %{name}-playbooks = %{version}
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description bin
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%files bin
-%{_bindir}/*
-%exclude %{_bindir}/atomic-openshift-installer
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-
-# ----------------------------------------------------------------------------------
-# openshift-ansible-zabbix subpackage
-# ----------------------------------------------------------------------------------
-%package zabbix
-Summary: Openshift and Atomic Enterprise Ansible Zabbix library
-Requires: python-openshift-tools-zbxapi
-BuildArch: noarch
-
-%description zabbix
-Python library for interacting with Zabbix with Ansible.
-
-%files zabbix
-%{_datadir}/ansible/zabbix
-
-# ----------------------------------------------------------------------------------
# openshift-ansible-docs subpackage
# ----------------------------------------------------------------------------------
%package docs
@@ -149,47 +94,6 @@ BuildArch: noarch
%doc docs
# ----------------------------------------------------------------------------------
-# openshift-ansible-inventory subpackage
-# ----------------------------------------------------------------------------------
-%package inventory
-Summary: Openshift and Atomic Enterprise Ansible Inventories
-Requires: %{name} = %{version}
-BuildArch: noarch
-
-%description inventory
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%files inventory
-%config(noreplace) /etc/ansible/*
-%dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_inventory.py*
-
-%package inventory-aws
-Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
-Requires: %{name}-inventory = %{version}
-Requires: python-boto
-BuildArch: noarch
-
-%description inventory-aws
-Ansible Inventories for AWS used with the openshift-ansible scripts and playbooks.
-
-%files inventory-aws
-%{_datadir}/ansible/inventory/aws/ec2.py*
-
-%package inventory-gce
-Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
-Requires: %{name}-inventory = %{version}
-Requires: python-libcloud >= 0.13
-BuildArch: noarch
-
-%description inventory-gce
-Ansible Inventories for GCE used with the openshift-ansible scripts and playbooks.
-
-%files inventory-gce
-%{_datadir}/ansible/inventory/gce/gce.py*
-
-
-# ----------------------------------------------------------------------------------
# openshift-ansible-playbooks subpackage
# ----------------------------------------------------------------------------------
%package playbooks
@@ -279,6 +183,67 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Apr 25 2016 Troy Dawson <tdawson@redhat.com> 3.0.85-1
+- Fix backward compat for osm_default_subdomain (jdetiber@redhat.com)
+- Replace deprecated sudo with become. (abutcher@redhat.com)
+- Fix image version handling for v1.2.0-rc1 (sdodson@redhat.com)
+- Pod must be recreated for the upgrade (bleanhar@redhat.com)
+- openshift_etcd_facts should rely on openshift_facts not openshift_common
+ (jdetiber@redhat.com)
+- Sort and de-dupe no_proxy list (sdodson@redhat.com)
+- openshift-metrics: adding duration and resolution options
+ (efreiber@redhat.com)
+- Changed service account creation to ansible (vishal.patil@nuagenetworks.net)
+- As per https://github.com/openshift/openshift-
+ ansible/issues/1795#issuecomment-213873564, renamed openshift_node_dnsmasq to
+ openshift_use_dnsmasq where applicable. Fixes 1795 (donovan@switchbit.io)
+- Add global proxy configuration (sdodson@redhat.com)
+- remove duplicate register: (tob@butter.sh)
+
+* Fri Apr 22 2016 Troy Dawson <tdawson@redhat.com> 3.0.84-1
+- Fix for docker not present (jdetiber@redhat.com)
+- Reconcile roles in additive-only mode on upgrade (jliggitt@redhat.com)
+- Set etcd_hostname and etcd_ip for masters w/ external etcd.
+ (abutcher@redhat.com)
+
+* Thu Apr 21 2016 Troy Dawson <tdawson@redhat.com> 3.0.83-1
+- a-o-i: Correct bug with default storage host (smunilla@redhat.com)
+- Only add new sccs (bleanhar@redhat.com)
+- Fix bug after portal_net move from master to common role.
+ (dgoodwin@redhat.com)
+- Sync latest content (sdodson@redhat.com)
+- Use xpaas 1.3.0-1, use enterprise content for metrics (sdodson@redhat.com)
+- Support configurable admin user and password for the enterprise Prefix
+ changes for admin and password with nuage_master (abhat@nuagenetworks.net)
+
+* Wed Apr 20 2016 Troy Dawson <tdawson@redhat.com> 3.0.82-1
+- Use a JSON list for docker log options. (dgoodwin@redhat.com)
+- Fix legacy cli_docker_* vars not migrating. (dgoodwin@redhat.com)
+- Fix use of older image tag version during upgrade. (dgoodwin@redhat.com)
+- Remove etcd_interface variable. Remove openshift_docker dependency from the
+ etcd role. (abutcher@redhat.com)
+- Use openshift_hostname/openshift_ip values for etcd configuration and
+ certificates. (abutcher@redhat.com)
+- added new openshift-metrics service (j.david.nieto@gmail.com)
+- Translate legacy facts within the oo_openshift_env filter.
+ (abutcher@redhat.com)
+- Remove empty facts from nested dictionaries. (abutcher@redhat.com)
+- Fix router selector fact migration and match multiple selectors when counting
+ nodes. (abutcher@redhat.com)
+- Fixing the spec for PR 1734 (bleanhar@redhat.com)
+- Add openshift_use_dnsmasq (sdodson@redhat.com)
+- Promote portal_net to openshift.common, add kube_svc_ip (sdodson@redhat.com)
+- Add example inventories to docs, install docs by default (sdodson@redhat.com)
+- Fix use of JSON inventory vars with raw booleans. (dgoodwin@redhat.com)
+- cleanup roles after roles move to openshift-tools (jdiaz@redhat.com)
+- Reference Setup for Origin and Ose from up-to-date docs.openshift.[com|org]
+ instead of local README_[origin|OSE].md (jchaloup@redhat.com)
+
+* Mon Apr 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.81-1
+- IMAGE_PREFIX=openshift3/ for enterprise logging/metrics (sdodson@redhat.com)
+- a-o-i: Don't assume storage on 1st master (smunilla@redhat.com)
+- Bug 1320829 - Handle OSE 3.0 installs (bleanhar@redhat.com)
+
* Fri Apr 15 2016 Troy Dawson <tdawson@redhat.com> 3.0.80-1
- Refactor docker failed state cleanup (sdodson@redhat.com)
- Support mixed RPM/container installs (bleanhar@redhat.com)
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index c14d08e87..5a5a00ea4 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -8,7 +8,7 @@
- hosts:
- OSEv3:children
- sudo: yes
+ become: yes
tasks:
- shell: docker ps -a -q | xargs docker stop
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
index 5f3631fcf..fd489dc70 100644
--- a/playbooks/adhoc/setupnfs.yml
+++ b/playbooks/adhoc/setupnfs.yml
@@ -1,7 +1,7 @@
---
### This playbook is old and we are currently not using NFS.
- hosts: tag_Name_nfs-v3-stg
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 8fb515982..0755d8bc5 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -10,7 +10,7 @@
- hosts:
- OSEv3:children
- sudo: yes
+ become: yes
tasks:
- name: Detecting Operating System
@@ -232,6 +232,9 @@
- /usr/local/bin/oadm
- /usr/local/bin/oc
- /usr/local/bin/kubectl
+ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+ - /etc/dnsmasq.d/origin-dns.conf
+ - /etc/dnsmasq.d/origin-upstream-dns.conf
# Since we are potentially removing the systemd unit files for separated
# master-api and master-controllers services, so we need to reload the
@@ -240,7 +243,9 @@
command: systemctl daemon-reload
- hosts: nodes
- sudo: yes
+ become: yes
tasks:
- name: restart docker
service: name=docker state=restarted
+ - name: restart NetworkManager
+ service: name=NetworkManager state=restarted
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 9fba856a2..66ff3e5b8 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -11,7 +11,7 @@
openshift_deployment_type: "{{ deployment_type }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}'
openshift_master_cluster_method: 'native'
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 8b41a355e..d591c884d 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
index 7e3a47964..d91f2288e 100644
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ groups.nodes_to_add }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
index d5f7d6b19..68c73109f 100644
--- a/playbooks/aws/openshift-cluster/service.yml
+++ b/playbooks/aws/openshift-cluster/service.yml
@@ -16,7 +16,7 @@
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ master_hosts | default([]) }}"
- name: Evaluate g_service_nodes
@@ -24,7 +24,7 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ node_hosts | default([]) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 63be06ecf..cd2146884 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -173,7 +173,7 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
@@ -188,7 +188,7 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: nodes_to_add
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 6dd5d8b62..5ef50ffb9 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 32bab76b5..b3998d4e0 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ae12286bd..f9d539e16 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -7,7 +7,7 @@ deployment_rhel7_ent_base:
image_name:
region: us-east-1
ssh_user: ec2-user
- sudo: yes
+ become: yes
keypair: libra
type: m4.large
security_groups: [ 'public' ]
@@ -21,7 +21,7 @@ deployment_vars:
image_name:
region: us-east-1
ssh_user: centos
- sudo: yes
+ become: yes
keypair: libra
type: m4.large
security_groups: [ 'public' ]
@@ -33,7 +33,7 @@ deployment_vars:
image_name: openshift-rhel7_*
region: us-east-1
ssh_user: root
- sudo: no
+ become: no
keypair: libra
type: m4.large
security_groups: [ 'public' ]
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 44bf962c9..c5a0f123c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -52,3 +52,5 @@
- role: openshift_registry
registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when: deploy_infra | bool and attach_registry_volume | bool
+ - role: openshift_metrics
+ when: openshift.hosted.metrics.deploy | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 6f908fa7f..99b36098a 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -4,7 +4,7 @@
- include: validate_hostnames.yml
- name: Set oo_options
- hosts: oo_hosts_to_config
+ hosts: oo_all_hosts
tasks:
- set_fact:
openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index ce0134c44..3fb42a7fa 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -34,7 +34,7 @@
name: "{{ item }}"
groups: oo_all_hosts
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_all_hosts | default([]) }}"
- name: Evaluate oo_masters
@@ -42,7 +42,7 @@
name: "{{ item }}"
groups: oo_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- name: Evaluate oo_etcd_to_config
@@ -50,7 +50,7 @@
name: "{{ item }}"
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
- name: Evaluate oo_masters_to_config
@@ -58,7 +58,7 @@
name: "{{ item }}"
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- name: Evaluate oo_nodes_to_config
@@ -66,7 +66,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
@@ -75,7 +75,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
@@ -91,7 +91,7 @@
name: "{{ g_master_hosts[0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
when: g_master_hosts|length > 0
- name: Evaluate oo_lb_to_config
@@ -99,7 +99,7 @@
name: "{{ item }}"
groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
- name: Evaluate oo_nfs_to_config
@@ -107,5 +107,5 @@
name: "{{ item }}"
groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 63c8ef756..51b108f6a 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -42,10 +42,10 @@
- name: Update cluster policy
hosts: oo_first_master
tasks:
- - name: oadm policy reconcile-cluster-roles --confirm
+ - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
- name: Upgrade default router
hosts: oo_first_master
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 31ba8c4a9..a72749a2b 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -490,7 +490,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 54bb251f7..5e62b43a3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -103,7 +103,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
index 2bd625257..319758a06 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -1,7 +1,7 @@
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.avail_version }}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
- name: Verifying the correct version was configured
shell: grep {{ verify_upgrade_version }} {{ item }}
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
new file mode 100644
index 000000000..a911f12be
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
@@ -0,0 +1,24 @@
+- name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+- include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+- name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index 21480ba55..db1d420ac 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -161,14 +161,14 @@
when: inventory_hostname in groups.oo_masters_to_config
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.curr_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
when: inventory_hostname in groups.oo_masters_to_config
- include_vars: ../../../../../roles/openshift_node/vars/main.yml
when: inventory_hostname in groups.oo_nodes_to_config
- name: Update systemd units
- include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{g_aos_versions.curr_version}}
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
when: inventory_hostname in groups.oo_nodes_to_config
# Note: the version number is hardcoded here in hopes of catching potential
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
index 7a2718e1b..5c96ad094 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -4,3 +4,6 @@
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index 8852bb8de..a28f7e9c1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -50,7 +50,7 @@
- include_vars: ../../../../../roles/openshift_master/vars/main.yml
- name: Update systemd units
- include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_aos_versions.avail_version }}
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
# - name: Upgrade master configuration
# openshift_upgrade_config:
@@ -88,23 +88,13 @@
###############################################################################
- name: Upgrade nodes
hosts: oo_nodes_to_config
+ serial: 1
roles:
- openshift_facts
handlers:
- include: ../../../../../roles/openshift_node/handlers/main.yml
tasks:
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: not openshift.common.is_containerized | bool
-
- - include: containerized_upgrade.yml
- when: openshift.common.is_containerized | bool
-
- # This will restart the node
- - name: Restart openvswitch service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
+ - include: node_upgrade.yml
- set_fact:
node_update_complete: True
@@ -130,6 +120,7 @@
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
+
- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
hosts: oo_masters_to_config
roles:
@@ -147,7 +138,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -164,7 +155,7 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2f07b2f51..01c092625 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -22,6 +22,8 @@
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
etcd_cert_prefix:
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -43,7 +45,7 @@
| oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
@@ -69,7 +71,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -91,7 +93,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index f1eaf8e16..060b5aa0d 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -35,6 +35,19 @@
- set_fact:
openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
+
+ - set_fact:
+ openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+ when: openshift_master_default_subdomain is not defined
+ - set_fact:
+ openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}"
+ when: openshift_hosted_metrics_deploy is not defined
+ - set_fact:
+ openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"
+ when: openshift_hosted_metrics_duration is not defined
+ - set_fact:
+ openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default(10) }}"
+ when: openshift_hosted_metrics_resolution is not defined
roles:
- openshift_facts
post_tasks:
@@ -53,7 +66,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- openshift_facts:
@@ -75,6 +87,8 @@
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: master.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- name: Create temp directory for syncing certs
@@ -98,7 +112,7 @@
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 02449e40d..57a63cfee 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -97,7 +97,7 @@
name: "{{ item }}"
groups: oo_active_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['is_active'] | default(false)) | bool
- name: Evaluate oo_current_masters
@@ -105,7 +105,7 @@
name: "{{ item }}"
groups: oo_current_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['current_host'] | default(false)) | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 7ca941732..264935a63 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -105,7 +105,7 @@
name: "{{ item }}"
groups: oo_containerized_master_nodes
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
@@ -119,7 +119,7 @@
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
roles:
- openshift_node
@@ -132,7 +132,7 @@
# hostvars[groups.oo_first_master.0].openshift.hosted.registry instead of
# hardcoding
openshift_docker_hosted_registry_insecure: True
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.master.portal_net }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
roles:
- openshift_node
@@ -154,6 +154,8 @@
etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
etcd_cert_prefix: node.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Configure flannel etcd certificates
@@ -168,7 +170,7 @@
| oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
roles:
- - role: etcd_certificates
+ - role: openshift_etcd_certificates
when: openshift_use_flannel | default(false) | bool
post_tasks:
- name: Create a tarball of the etcd flannel certs
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index ba37a3a1f..283f460a9 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -12,7 +12,7 @@
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 992033d16..2b1efc3e4 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List Hosts
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
index 914f38c1f..9942a0fd1 100644
--- a/playbooks/gce/openshift-cluster/service.yml
+++ b/playbooks/gce/openshift-cluster/service.yml
@@ -15,14 +15,14 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- add_host:
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 8ebf71cd4..0cfb1018f 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -39,7 +39,7 @@
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index d835c53ba..e64eddee0 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 2dc540978..95cdd177e 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index d173213fc..1497d5520 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -5,19 +5,19 @@ deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
+ become: yes
online:
image: libra-rhel7
machine_type: n1-standard-1
ssh_user: root
- sudo: no
+ become: no
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 0e003ef67..5bfe61657 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -14,7 +14,7 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6cb81ee79..314be1fab 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
index cd07c8701..6bd0516e3 100644
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ b/playbooks/libvirt/openshift-cluster/service.yml
@@ -18,7 +18,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: g_service_masters
with_items: "{{ g_master_hosts | default([]) }}"
@@ -26,7 +26,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: g_service_nodes
with_items: "{{ g_node_hosts | default([]) }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 937a765fa..4330179f4 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -113,7 +113,7 @@
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
with_together:
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index d6251ac83..cc95ec680 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -14,7 +14,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[cluster_group] | default([])
- name: Unsubscribe VMs
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 2dc540978..95cdd177e 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -13,7 +13,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index aa0c69e08..ca0c903ac 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -20,7 +20,7 @@ deployment_rhel7_ent_base:
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
compression: ""
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
@@ -34,14 +34,14 @@ deployment_vars:
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
- sudo: yes
+ become: yes
online:
image:
url:
name:
sha256:
ssh_user: root
- sudo: no
+ become: no
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 093beaf03..319202982 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -11,7 +11,7 @@
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
index 5e7671a48..02bcb0953 100644
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ b/playbooks/openstack/openshift-cluster/dns.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_dns_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ groups[cluster_id ~ '-dns'] }}"
- name: Evaluate oo_hosts_to_add_in_dns
@@ -20,7 +20,7 @@
name: "{{ item }}"
groups: oo_hosts_to_add_in_dns
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}"
- name: Gather facts
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index dd6a22cbe..2f05c3adc 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -350,7 +350,6 @@ resources:
port_range_min: 10250
port_range_max: 10250
remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- direction: ingress
protocol: udp
port_range_min: 4789
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 0afcad72e..a5b6dc8d9 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -106,7 +106,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "etcd"
@@ -120,7 +120,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "master"
@@ -134,7 +134,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "compute"
@@ -148,7 +148,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "infra"
@@ -162,7 +162,7 @@
hostname: '{{ parsed_outputs.dns_name }}'
ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
groups: '{{ cluster_id }}-dns'
- name: Wait for ssh
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index 123ebd323..78ee3328b 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -16,7 +16,7 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index a1fb41b53..063d775e1 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -10,7 +10,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
- name: Unsubscribe VMs
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 16027b15c..78ba7fbec 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].sudo }}"
with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index ee26d223e..d45ab6b9e 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -21,17 +21,17 @@ openstack_flavor:
deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
ssh_user: openshift
- sudo: yes
+ become: yes
online:
image:
ssh_user: root
- sudo: no
+ become: no
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/roles/ansible/tasks/config.yml b/roles/ansible/tasks/config.yml
deleted file mode 100644
index 5e361429b..000000000
--- a/roles/ansible/tasks/config.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: modify ansible.cfg
- lineinfile:
- dest: /etc/ansible/ansible.cfg
- backrefs: yes
- regexp: "^#?({{ item.option }})( *)="
- line: '\1\2= {{ item.value }}'
- with_items: cfg_options
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml
deleted file mode 100644
index ea14fb39a..000000000
--- a/roles/ansible/tasks/main.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# Install ansible client
-
-- name: Install Ansible
- action: "{{ ansible_pkg_mgr }} name=ansible state=present"
- when: not openshift.common.is_containerized | bool
-
-- include: config.yml
- vars:
- cfg_options: "{{ ans_config }}"
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
deleted file mode 100644
index 36fc9b282..000000000
--- a/roles/ansible_tower/tasks/main.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: install some useful packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - git
- - python-pip
- - unzip
- - python-psphere
- - ansible
- - telnet
- - ack
- - pylint
-
-- name: download Tower setup
- get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
-
-- name: extract Tower
- unarchive: src=/opt/ansible-tower-setup-2.1.1.tar.gz dest=/opt copy=no creates=ansible-tower-setup-2.1.1
-
-- name: Open firewalld port for http
- firewalld: port=80/tcp permanent=true state=enabled
-
-- name: Open firewalld port for https
- firewalld: port=443/tcp permanent=true state=enabled
-
-- name: Open firewalld port for https
- firewalld: port=8080/tcp permanent=true state=enabled
-
-- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
- seboolean: name=httpd_can_network_connect state=yes persistent=yes
-
-- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
- seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
-
-- name: Setup proot to allow access to /etc/tower/
- lineinfile:
- dest: /etc/tower/settings.py
- backrefs: yes
- regexp: "^({{ item.option }})( *)="
- line: '\1\2= {{ item.value }}'
- with_items: config_changes | default([], true)
diff --git a/roles/ansible_tower_cli/README.md b/roles/ansible_tower_cli/README.md
deleted file mode 100644
index d2d68146f..000000000
--- a/roles/ansible_tower_cli/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-ansible_tower_cli
-==============
-
-Install ansible-tower-cli rpm.
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-None
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - roles/ansible_tower_cli
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Author Information
-------------------
-
-openshift operations
diff --git a/roles/ansible_tower_cli/defaults/main.yml b/roles/ansible_tower_cli/defaults/main.yml
deleted file mode 100644
index bef66bbfd..000000000
--- a/roles/ansible_tower_cli/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for ansible_tower_cli
diff --git a/roles/ansible_tower_cli/handlers/main.yml b/roles/ansible_tower_cli/handlers/main.yml
deleted file mode 100644
index 0ce873648..000000000
--- a/roles/ansible_tower_cli/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for ansible_tower_cli
diff --git a/roles/ansible_tower_cli/meta/main.yml b/roles/ansible_tower_cli/meta/main.yml
deleted file mode 100644
index 41dd23720..000000000
--- a/roles/ansible_tower_cli/meta/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-galaxy_info:
- author: openshift operations name
- description: install ansible-tower-cli
- company: Red Hat, inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
-
diff --git a/roles/ansible_tower_cli/tasks/main.yml b/roles/ansible_tower_cli/tasks/main.yml
deleted file mode 100644
index 0c5163b50..000000000
--- a/roles/ansible_tower_cli/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Install python-ansible-tower-cli
- action: "{{ ansible_pkg_mgr }} name=python-ansible-tower-cli state=present"
-
-- template:
- src: tower_cli.cfg.j2
- dest: /etc/tower/tower_cli.cfg
- owner: awx
- group: awx
- mode: 0640
-
-- file:
- state: link
- src: /etc/tower
- dest: /etc/awx
- owner: awx
- group: awx
-
diff --git a/roles/ansible_tower_cli/templates/tower_cli.cfg.j2 b/roles/ansible_tower_cli/templates/tower_cli.cfg.j2
deleted file mode 100644
index 5a0a275b0..000000000
--- a/roles/ansible_tower_cli/templates/tower_cli.cfg.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-[general]
-host = {{ oo_towercli_host }}
-username = {{ oo_towercli_username }}
-password = {{ oo_towercli_password }}
-verify_ssl = true
diff --git a/roles/ansible_tower_cli/vars/main.yml b/roles/ansible_tower_cli/vars/main.yml
deleted file mode 100644
index a4c9abfb4..000000000
--- a/roles/ansible_tower_cli/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for ansible_tower_cli
diff --git a/roles/chrony/README.md b/roles/chrony/README.md
deleted file mode 100644
index bf15d9669..000000000
--- a/roles/chrony/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-Role Name
-=========
-
-A role to configure chrony as the ntp client
-
-Requirements
-------------
-
-
-Role Variables
---------------
-
-chrony_ntp_servers: a list of ntp servers to use the chrony.conf file
-
-Dependencies
-------------
-
-roles/lib_timedatectl
-
-Example Playbook
-----------------
-
-License
--------
-
-Apache 2.0
-
-Author Information
-------------------
-
-Openshift Operations
diff --git a/roles/chrony/defaults/main.yml b/roles/chrony/defaults/main.yml
deleted file mode 100644
index 95576e666..000000000
--- a/roles/chrony/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for chrony
diff --git a/roles/chrony/handlers/main.yml b/roles/chrony/handlers/main.yml
deleted file mode 100644
index 1973c79e2..000000000
--- a/roles/chrony/handlers/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- name: Restart chronyd
- service:
- name: chronyd
- state: restarted
diff --git a/roles/chrony/meta/main.yml b/roles/chrony/meta/main.yml
deleted file mode 100644
index 85595d7c3..000000000
--- a/roles/chrony/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-galaxy_info:
- author: Openshift Operations
- description: Configure chrony as an ntp server
- company: Red Hat
- license: Apache 2.0
- min_ansible_version: 1.9.2
- platforms:
- - name: EL
- versions:
- - 7
- - name: Fedora
- versions:
- - all
- categories:
- - system
-dependencies:
-- roles/lib_timedatectl
diff --git a/roles/chrony/tasks/main.yml b/roles/chrony/tasks/main.yml
deleted file mode 100644
index fae6d8e4c..000000000
--- a/roles/chrony/tasks/main.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- name: remove ntp package
- yum:
- name: ntp
- state: absent
-
-- name: ensure chrony package is installed
- yum:
- name: chrony
- state: installed
-
-- name: Install /etc/chrony.conf
- template:
- src: chrony.conf.j2
- dest: /etc/chrony.conf
- owner: root
- group: root
- mode: 0644
- notify:
- - Restart chronyd
-
-- name: enabled timedatectl set-ntp yes
- timedatectl:
- ntp: True
-
-- name:
- service:
- name: chronyd
- state: started
- enabled: yes
diff --git a/roles/chrony/templates/chrony.conf.j2 b/roles/chrony/templates/chrony.conf.j2
deleted file mode 100644
index de43b6364..000000000
--- a/roles/chrony/templates/chrony.conf.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-# Use public servers from the pool.ntp.org project.
-# Please consider joining the pool (http://www.pool.ntp.org/join.html).
-{% for server in chrony_ntp_servers %}
-server {{ server }} iburst
-{% endfor %}
-
-# Ignore stratum in source selection.
-stratumweight 0
-
-# Record the rate at which the system clock gains/losses time.
-driftfile /var/lib/chrony/drift
-
-# Enable kernel RTC synchronization.
-rtcsync
-
-# In first three updates step the system clock instead of slew
-# if the adjustment is larger than 10 seconds.
-makestep 10 3
-
-# Allow NTP client access from local network.
-#allow 192.168/16
-
-# Listen for commands only on localhost.
-bindcmdaddress 127.0.0.1
-bindcmdaddress ::1
-
-# Serve time even if not synchronized to any NTP server.
-#local stratum 10
-
-keyfile /etc/chrony.keys
-
-# Specify the key used as password for chronyc.
-commandkey 1
-
-# Generate command key if missing.
-generatecommandkey
-
-# Disable logging of client accesses.
-noclientlog
-
-# Send a message to syslog if a clock adjustment is larger than 0.5 seconds.
-logchange 0.5
-
-logdir /var/log/chrony
-#log measurements statistics tracking
diff --git a/roles/chrony/vars/main.yml b/roles/chrony/vars/main.yml
deleted file mode 100644
index 061a21547..000000000
--- a/roles/chrony/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for chrony
diff --git a/roles/copr_cli/README.md b/roles/copr_cli/README.md
deleted file mode 100644
index edc68454e..000000000
--- a/roles/copr_cli/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-This role manages Copr CLI.
-
-https://apps.fedoraproject.org/packages/copr-cli/
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-None
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
- - hosts: servers
- roles:
- - role: copr_cli
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Thomas Wiest
diff --git a/roles/copr_cli/defaults/main.yml b/roles/copr_cli/defaults/main.yml
deleted file mode 100644
index 3b8adf910..000000000
--- a/roles/copr_cli/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for copr_cli
diff --git a/roles/copr_cli/handlers/main.yml b/roles/copr_cli/handlers/main.yml
deleted file mode 100644
index c3dec5a4c..000000000
--- a/roles/copr_cli/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for copr_cli
diff --git a/roles/copr_cli/meta/main.yml b/roles/copr_cli/meta/main.yml
deleted file mode 100644
index f050281fd..000000000
--- a/roles/copr_cli/meta/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Manages Copr CLI
- company: Red Hat
- license: Apache License, Version 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - packaging
-dependencies: []
diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml
deleted file mode 100644
index b732fb7a4..000000000
--- a/roles/copr_cli/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- action: "{{ ansible_pkg_mgr }} name=copr-cli state=present"
- when: not openshift.common.is_containerized | bool
diff --git a/roles/copr_cli/vars/main.yml b/roles/copr_cli/vars/main.yml
deleted file mode 100644
index 1522c94d9..000000000
--- a/roles/copr_cli/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for copr_cli
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 878d5fea8..0414ff21e 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -22,7 +22,7 @@
- name: Install docker
action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
- when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt')
+ when: not openshift.common.is_atomic | bool and docker_downgrade_result | skipped
# If docker were enabled and started before we downgraded it may have entered a
# failed state. Check for that and clear it if necessary.
@@ -69,6 +69,21 @@
reg_flag: --insecure-registry
notify:
- restart docker
+
+- name: Set Proxy Settings
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: '^{{ item.reg_conf_var }}=.*$'
+ line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
+ with_items:
+ - reg_conf_var: HTTP_PROXY
+ reg_fact_val: "{{ docker_http_proxy | default('') }}"
+ - reg_conf_var: HTTPS_PROXY
+ reg_fact_val: "{{ docker_https_proxy | default('') }}"
+ - reg_conf_var: NO_PROXY
+ reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+ notify:
+ - restart docker
- name: Set various docker options
lineinfile:
diff --git a/roles/docker_storage_setup/README.md b/roles/docker_storage_setup/README.md
deleted file mode 100644
index 6039b349a..000000000
--- a/roles/docker_storage_setup/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-docker_storage_setup
-=========
-This role coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
-
-It requires the block device to be already provisioned and attached to the host.
-
- Notes:
- * This is NOT idempotent. Conversion needs to be done for it to be idempotent
- * This will remove /var/lib/docker!
- * You will need to re-deploy docker images
-
-Configure docker_storage_setup
-------------
-
-None
-
-Role Variables
---------------
-
-dss_docker_device: defaults to /dev/xvdb
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
- - hosts: servers
- roles:
- - { role/docker_storage_setup, dss_docker_device: '/dev/xvdb' }
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/docker_storage_setup/defaults/main.yml b/roles/docker_storage_setup/defaults/main.yml
deleted file mode 100644
index 5013aba97..000000000
--- a/roles/docker_storage_setup/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-dss_docker_device: /dev/xvdb
diff --git a/roles/docker_storage_setup/tasks/main.yml b/roles/docker_storage_setup/tasks/main.yml
deleted file mode 100755
index 21c80e36e..000000000
--- a/roles/docker_storage_setup/tasks/main.yml
+++ /dev/null
@@ -1,95 +0,0 @@
----
-- name: Ensure docker is installed
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - docker-1.8.2
-
-# Docker doesn't seem to start cleanly the first time run
-# when loopback (not directlvm) is configured. Putting in an
-# ignore errors, and then sleeping till it can come up cleanly
-- name: Try to start docker. This might fail (loopback startup issue)
- service:
- name: docker
- state: started
- ignore_errors: yes
-
-- name: Pause for 1 minute
- pause:
- seconds: 30
-
-- name: Ensure docker is started
- service:
- name: docker
- state: started
-
-- name: Determine if loopback
- shell: docker info | grep 'Data file:.*loop'
- register: loop_device_check
- ignore_errors: yes
-
-- debug:
- var: loop_device_check
-
-- name: fail if we don't detect loopback
- fail:
- msg: loopback not detected! Please investigate manually.
- when: loop_device_check.rc == 1
-
-- name: "check to see if {{ dss_docker_device }} exists"
- command: "test -e {{ dss_docker_device }}"
- register: docker_dev_check
- ignore_errors: yes
-
-- debug: var=docker_dev_check
-
-- name: "fail if {{ dss_docker_device }} doesn't exist"
- fail:
- msg: "{{ dss_docker_device }} doesn't exist. Please investigate"
- when: docker_dev_check.rc != 0
-
-- name: stop docker
- service:
- name: docker
- state: stopped
-
-- name: delete /var/lib/docker
- command: rm -rf /var/lib/docker
-
-- name: remove /var/lib/docker
- command: rm -rf /var/lib/docker
-
-- name: copy the docker-storage-setup config file
- copy:
- content: >
- DEVS={{ dss_docker_device }}\n
- VG=docker_vg
- dest: /etc/sysconfig/docker-storage-setup
- owner: root
- group: root
- mode: 0664
-
-- name: docker storage setup
- command: docker-storage-setup
- register: docker_storage_setup_output
-
-- debug:
- msg: "{{ docker_storage_setup_output }}"
-
-- name: extend the vg
- command: lvextend -l 90%VG /dev/docker_vg/docker-pool
- register: lvextend_output
-
-- debug:
- msg: "{{ lvextend_output }}"
-
-- name: start docker
- service:
- name: docker
- state: restarted
-
-- name: docker info
- command: docker info
- register: dockerinfo
-
-- debug:
- msg: "{{ dockerinfo }}"
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index e6b10cab7..a2212bacd 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -1,6 +1,5 @@
---
-etcd_service: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
-etcd_interface: "{{ ansible_default_ipv4.interface }}"
+etcd_service: "{{ 'etcd' if not etcd_is_containerized | bool else 'etcd_container' }}"
etcd_client_port: 2379
etcd_peer_port: 2380
etcd_url_scheme: http
@@ -9,10 +8,10 @@ etcd_peer_url_scheme: http
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
-etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}"
-etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
-etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_client_port }}"
+etcd_initial_advertise_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
+etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
+etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_data_dir: /var/lib/etcd/
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 36906b347..a71b36237 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -16,6 +16,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_docker }
- { role: os_firewall }
- { role: etcd_common }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index afec6b30b..a798dc973 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,36 +1,35 @@
---
-- fail:
- msg: Interface {{ etcd_interface }} not found
- when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
-
-- fail:
- msg: IPv4 address not found for {{ etcd_interface }}
- when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+- name: Set hostname and ip facts
+ set_fact:
+ # Store etcd_hostname and etcd_ip such that they will be available
+ # in hostvars. Defaults for these variables are set in etcd_common.
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
- name: Install etcd
action: "{{ ansible_pkg_mgr }} name=etcd state=present"
- when: not openshift.common.is_containerized | bool
+ when: not etcd_is_containerized | bool
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Install etcd container service file
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
register: install_etcd_result
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
- name: Ensure etcd datadir exists
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
file:
path: "{{ etcd_data_dir }}"
state: directory
mode: 0700
- name: Disable system etcd when containerized
- when: openshift.common.is_containerized | bool
+ when: etcd_is_containerized | bool
service:
name: etcd
state: stopped
@@ -42,27 +41,27 @@
changed_when: false
- name: Mask system etcd when containerized
- when: openshift.common.is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
+ when: etcd_is_containerized | bool and 'LoadState=not-found' not in etcd_show.stdout
command: systemctl mask etcd
- name: Reload systemd units
command: systemctl daemon-reload
- when: openshift.common.is_containerized | bool and ( install_etcd_result | changed )
+ when: etcd_is_containerized | bool and ( install_etcd_result | changed )
- name: Validate permissions on the config dir
file:
path: "{{ etcd_conf_dir }}"
state: directory
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
mode: 0700
- name: Validate permissions on certificate files
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_url_scheme == 'https'
with_items:
- "{{ etcd_ca_file }}"
@@ -73,8 +72,8 @@
file:
path: "{{ item }}"
mode: 0600
- owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
- group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+ owner: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
+ group: "{{ 'etcd' if not etcd_is_containerized | bool else omit }}"
when: etcd_peer_url_scheme == 'https'
with_items:
- "{{ etcd_peer_ca_file }}"
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 28816fd87..cd048ec60 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -1,15 +1,15 @@
{% macro initial_cluster() -%}
-{% for host in groups[etcd_peers_group] -%}
+{% for host in etcd_peers -%}
{% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }}
{%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
+{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
{% endmacro -%}
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
-ETCD_NAME={{ inventory_hostname }}
+{% if etcd_peers | default([]) | length > 1 %}
+ETCD_NAME={{ etcd_hostname }}
ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
{% else %}
ETCD_NAME=default
@@ -23,7 +23,7 @@ ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
#ETCD_MAX_WALS=5
#ETCD_CORS=
-{% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
+{% if etcd_peers | default([]) | length > 1 %}
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index cf7bc00a3..e1bb9baed 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Install openssl
action: "{{ ansible_pkg_mgr }} name=openssl state=present"
- when: not openshift.common.is_atomic | bool
+ when: not etcd_is_atomic | bool
- file:
path: "{{ item }}"
diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml
index 6aa4883e0..7bf95809f 100644
--- a/roles/etcd_certificates/tasks/client.yml
+++ b/roles/etcd_certificates/tasks/client.yml
@@ -12,13 +12,13 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}client.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_client_certs
- name: Sign and create the client crt
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.crt' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_client_certs
- file:
diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml
index 3499dcbef..2589c5192 100644
--- a/roles/etcd_certificates/tasks/server.yml
+++ b/roles/etcd_certificates/tasks/server.yml
@@ -12,13 +12,13 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}server.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_server_certs
- name: Sign and create the server crt
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_server_certs
- name: Create the peer csr
@@ -41,13 +41,13 @@
-config {{ etcd_openssl_conf }}
-out {{ item.etcd_cert_prefix }}peer.csr
-reqexts {{ etcd_req_ext }} -batch -nodes
- -subj /CN={{ item.openshift.common.hostname }}
+ -subj /CN={{ item.etcd_hostname }}
args:
chdir: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}"
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_server_certs
- name: Sign and create the peer crt
@@ -61,7 +61,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.crt' }}"
environment:
- SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
+ SAN: "IP:{{ item.etcd_ip }}"
with_items: etcd_needing_server_certs
- file:
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
index 3af509448..1ff1d6ef8 100644
--- a/roles/etcd_common/defaults/main.yml
+++ b/roles/etcd_common/defaults/main.yml
@@ -1,6 +1,4 @@
---
-etcd_peers_group: oo_etcd_to_config
-
# etcd server vars
etcd_conf_dir: /etc/etcd
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
@@ -28,3 +26,9 @@ etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
etcd_ca_default_days: 365
+
+# etcd server & certificate vars
+etcd_hostname: "{{ inventory_hostname }}"
+etcd_ip: "{{ ansible_default_ipv4.address }}"
+etcd_is_atomic: False
+etcd_is_containerized: False
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
deleted file mode 100644
index be75fdab2..000000000
--- a/roles/etcd_common/tasks/main.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- set_fact:
- etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
-
-- fail:
- msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
- when: "'etcd_interface' in item.value and 'interface' not in item.value"
- with_dict: etcd_host_int_map | default({})
-
-- fail:
- msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
- when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
- with_dict: etcd_host_int_map | default({})
diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2
deleted file mode 100644
index 9c9c76413..000000000
--- a/roles/etcd_common/templates/host_int_map.j2
+++ /dev/null
@@ -1,13 +0,0 @@
----
-{% for host in groups[etcd_peers_group] %}
-{% set entry=hostvars[host] %}
-{{ entry.inventory_hostname }}:
-{% if 'etcd_interface' in entry %}
- etcd_interface: {{ entry.etcd_interface }}
-{% if entry.etcd_interface in entry.ansible_interfaces %}
- interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
-{% endif %}
-{% else %}
- interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
-{% endif %}
-{% endfor %}
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index f9b9ae7f1..981ea5c7a 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -1,8 +1,8 @@
---
- name: restart flanneld
- sudo: true
+ become: yes
service: name=flanneld state=restarted
- name: restart docker
- sudo: true
+ become: yes
service: name=docker state=restarted
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index aa27b674e..6b6dfb423 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -1,11 +1,11 @@
---
- name: Install flannel
- sudo: true
+ become: yes
action: "{{ ansible_pkg_mgr }} name=flannel state=present"
when: not openshift.common.is_containerized | bool
- name: Set flannel etcd url
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -13,7 +13,7 @@
line: '\1{{ etcd_hosts|join(",") }}'
- name: Set flannel etcd key
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -21,7 +21,7 @@
line: '\1{{ flannel_etcd_key }}'
- name: Set flannel options
- sudo: true
+ become: yes
lineinfile:
dest: /etc/sysconfig/flanneld
backrefs: yes
@@ -29,7 +29,7 @@
line: '\1--iface {{ flannel_interface }} --etcd-cafile={{ etcd_peer_ca_file }} --etcd-keyfile={{ etcd_peer_key_file }} --etcd-certfile={{ etcd_peer_cert_file }}'
- name: Enable flanneld
- sudo: true
+ become: yes
service:
name: flanneld
state: started
@@ -37,7 +37,7 @@
register: start_result
- name: Remove docker bridge ip
- sudo: true
+ become: yes
shell: ip a del `ip a show docker0 | grep "inet[[:space:]]" | awk '{print $2}'` dev docker0
notify:
- restart docker
diff --git a/roles/flannel_register/README.md b/roles/flannel_register/README.md
index ba7541ab1..623c4c7cf 100644
--- a/roles/flannel_register/README.md
+++ b/roles/flannel_register/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | Description |
|---------------------|----------------------------------------------------|-------------------------------------------------|
-| flannel_network | {{ openshift.master.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
+| flannel_network | {{ openshift.common.portal_net }} or 172.16.1.1/16 | interface to use for inter-host communication |
| flannel_min_network | {{ min_network }} or 172.16.5.0 | beginning of IP range for the subnet allocation |
| flannel_subnet_len | /openshift.com/network | size of the subnet allocated to each host |
| flannel_etcd_key | /openshift.com/network | etcd prefix |
diff --git a/roles/flannel_register/defaults/main.yaml b/roles/flannel_register/defaults/main.yaml
index 269d1a17c..b1279aa88 100644
--- a/roles/flannel_register/defaults/main.yaml
+++ b/roles/flannel_register/defaults/main.yaml
@@ -1,5 +1,5 @@
---
-flannel_network: "{{ openshift.master.portal_net | default('172.30.0.0/16', true) }}"
+flannel_network: "{{ openshift.common.portal_net | default('172.30.0.0/16', true) }}"
flannel_min_network: 172.30.5.0
flannel_subnet_len: 24
flannel_etcd_key: /openshift.com/network
diff --git a/roles/flannel_register/tasks/main.yml b/roles/flannel_register/tasks/main.yml
index 1629157c8..845b7ef40 100644
--- a/roles/flannel_register/tasks/main.yml
+++ b/roles/flannel_register/tasks/main.yml
@@ -1,14 +1,14 @@
---
- name: Assures /etc/flannel dir exists
- sudo: true
+ become: yes
file: path=/etc/flannel state=directory
- name: Generate etcd configuration for etcd
- sudo: true
+ become: yes
template:
src: "flannel-config.json"
dest: "/etc/flannel/config.json"
- name: Insert flannel configuration into etcd
- sudo: true
+ become: yes
command: 'curl -L --cacert "{{ etcd_peer_ca_file }}" --cert "{{ etcd_peer_cert_file }}" --key "{{ etcd_peer_key_file }}" "{{ etcd_hosts[0] }}/v2/keys{{ flannel_etcd_key }}/config" -XPUT --data-urlencode value@/etc/flannel/config.json'
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
index 1520f79b2..dd91ad8b1 100644
--- a/roles/kube_nfs_volumes/README.md
+++ b/roles/kube_nfs_volumes/README.md
@@ -94,7 +94,7 @@ partitions.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: yes
+ become: yes
roles:
- role: kube_nfs_volumes
disks: "/dev/sdb,/dev/sdc"
diff --git a/roles/lib_dyn/README.md b/roles/lib_dyn/README.md
deleted file mode 100644
index 1eec9f81c..000000000
--- a/roles/lib_dyn/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-lib_dyn
-=========
-
-A role containing the dyn_record module for managing DNS records through Dyn's
-API
-
-Requirements
-------------
-
-The module requires the `dyn` python module for interacting with the Dyn API.
-https://github.com/dyninc/dyn-python
-
-Example Playbook
-----------------
-
-To make sure the `dyn_record` module is available for use include the role
-before it is used.
-
- - hosts: servers
- roles:
- - lib_dyn
-
-License
--------
-
-Apache
-
diff --git a/roles/lib_dyn/library/dyn_record.py b/roles/lib_dyn/library/dyn_record.py
deleted file mode 100644
index 42d970060..000000000
--- a/roles/lib_dyn/library/dyn_record.py
+++ /dev/null
@@ -1,351 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2015, Russell Harrison <rharriso@redhat.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=too-many-branches
-'''Ansible module to manage records in the Dyn Managed DNS service'''
-DOCUMENTATION = '''
----
-module: dyn_record
-version_added: "1.9"
-short_description: Manage records in the Dyn Managed DNS service.
-description:
- - "Manages DNS records via the REST API of the Dyn Managed DNS service. It
- - "handles records only; there is no manipulation of zones or account support"
- - "yet. See: U(https://help.dyn.com/dns-api-knowledge-base/)"
-options:
- state:
- description:
- -"Whether the record should be c(present) or c(absent). Optionally the"
- - "state c(list) can be used to return the current value of a record."
- required: true
- choices: [ 'present', 'absent', 'list' ]
- default: present
-
- customer_name:
- description:
- - "The Dyn customer name for your account. If not set the value of the"
- - "c(DYNECT_CUSTOMER_NAME) environment variable is used."
- required: false
- default: nil
-
- user_name:
- description:
- - "The Dyn user name to log in with. If not set the value of the"
- - "c(DYNECT_USER_NAME) environment variable is used."
- required: false
- default: null
-
- user_password:
- description:
- - "The Dyn user's password to log in with. If not set the value of the"
- - "c(DYNECT_PASSWORD) environment variable is used."
- required: false
- default: null
-
- zone:
- description:
- - "The DNS zone in which your record is located."
- required: true
- default: null
-
- record_fqdn:
- description:
- - "Fully qualified domain name of the record name to get, create, delete,"
- - "or update."
- required: true
- default: null
-
- record_type:
- description:
- - "Record type."
- required: true
- choices: [ 'A', 'AAAA', 'CNAME', 'PTR', 'TXT' ]
- default: null
-
- record_value:
- description:
- - "Record value. If record_value is not specified; no changes will be"
- - "made and the module will fail"
- required: false
- default: null
-
- record_ttl:
- description:
- - 'Record's "Time to live". Number of seconds the record remains cached'
- - 'in DNS servers or c(0) to use the default TTL for the zone.'
- - 'This option is mutually exclusive with use_zone_ttl'
- required: false
- default: 0
-
- use_zone_ttl:
- description:
- - 'Use the DYN Zone's Default TTL'
- - 'This option is mutually exclusive with record_ttl'
- required: false
- default: false
- mutually exclusive with: record_ttl
-
-notes:
- - The module makes a broad assumption that there will be only one record per "node" (FQDN).
- - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
-
-requirements: [ dyn ]
-author: "Russell Harrison"
-'''
-
-EXAMPLES = '''
-# Attempting to cname www.example.com to web1.example.com
-- name: Update CNAME record
- dyn_record:
- state: present
- record_fqdn: www.example.com
- zone: example.com
- record_type: CNAME
- record_value: web1.example.com
- record_ttl: 7200
-
-# Use the zones default TTL
-- name: Update CNAME record
- dyn_record:
- state: present
- record_fqdn: www.example.com
- zone: example.com
- record_type: CNAME
- record_value: web1.example.com
- use_zone_ttl: true
-
-- name: Update A record
- dyn_record:
- state: present
- record_fqdn: web1.example.com
- zone: example.com
- record_value: 10.0.0.10
- record_type: A
-'''
-
-try:
- IMPORT_ERROR = False
- from dyn.tm.session import DynectSession
- from dyn.tm.zones import Zone
- import dyn.tm.errors
- import os
-
-except ImportError as error:
- IMPORT_ERROR = str(error)
-
-# Each of the record types use a different method for the value.
-RECORD_PARAMS = {
- 'A' : {'value_param': 'address'},
- 'AAAA' : {'value_param': 'address'},
- 'CNAME' : {'value_param': 'cname'},
- 'PTR' : {'value_param': 'ptrdname'},
- 'TXT' : {'value_param': 'txtdata'}
-}
-
-# You'll notice that the value_param doesn't match the key (records_key)
-# in the dict returned from Dyn when doing a dyn_node.get_all_records()
-# This is a frustrating lookup dict to allow mapping to the RECORD_PARAMS
-# dict so we can lookup other values in it efficiently
-
-def get_record_type(record_key):
- '''Get the record type represented by the keys returned from get_any_records.'''
- return record_key.replace('_records', '').upper()
-
-def get_record_key(record_type):
- '''Get the key to look up records in the dictionary returned from get_any_records.
- example:
- 'cname_records'
- '''
- return record_type.lower() + '_records'
-
-def get_any_records(module, node):
- '''Get any records for a given node'''
- # Lets get a list of the A records for the node
- try:
- records = node.get_any_records()
- except dyn.tm.errors.DynectGetError as error:
- if 'Not in zone' in str(error):
- # The node isn't in the zone so we'll return an empty dictionary
- return {}
- else:
- # An unknown error happened so we'll need to return it.
- module.fail_json(msg='Unable to get records',
- error=str(error))
-
- # Return a dictionary of the record objects
- return records
-
-def get_record_values(records):
- '''Get the record values for each record returned by get_any_records.'''
- # This simply returns the values from a record
- ret_dict = {}
- for key in records.keys():
- record_type = get_record_type(key)
- params = [RECORD_PARAMS[record_type]['value_param'], 'ttl', 'zone', 'fqdn']
- ret_dict[key] = []
- properties = {}
- for elem in records[key]:
- for param in params:
- properties[param] = getattr(elem, param)
- ret_dict[key].append(properties)
-
- return ret_dict
-
-def compare_record_values(record_type_key, user_record_value, dyn_values):
- ''' Verify the user record_value exists in dyn'''
- rtype = get_record_type(record_type_key)
- for record in dyn_values[record_type_key]:
- if user_record_value in record[RECORD_PARAMS[rtype]['value_param']]:
- return True
-
- return False
-
-def compare_record_ttl(record_type_key, user_record_value, dyn_values, user_param_ttl):
- ''' Verify the ttls match for the record'''
- rtype = get_record_type(record_type_key)
- for record in dyn_values[record_type_key]:
- # find the right record
- if user_record_value in record[RECORD_PARAMS[rtype]['value_param']]:
- # Compare ttls from the records
- if int(record['ttl']) == user_param_ttl:
- return True
-
- return False
-
-def main():
- '''Ansible module for managing Dyn DNS records.'''
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent', 'list']),
- customer_name=dict(default=os.environ.get('DYNECT_CUSTOMER_NAME', None), type='str'),
- user_name=dict(default=os.environ.get('DYNECT_USER_NAME', None), type='str', no_log=True),
- user_password=dict(default=os.environ.get('DYNECT_PASSWORD', None), type='str', no_log=True),
- zone=dict(required=True, type='str'),
- record_fqdn=dict(required=False, type='str'),
- record_type=dict(required=False, type='str', choices=[
- 'A', 'AAAA', 'CNAME', 'PTR', 'TXT']),
- record_value=dict(required=False, type='str'),
- record_ttl=dict(required=False, default=None, type='int'),
- use_zone_ttl=dict(required=False, default=False),
- ),
- required_together=(
- ['record_fqdn', 'record_value', 'record_ttl', 'record_type']
- ),
- mutually_exclusive=[('record_ttl', 'use_zone_ttl')]
- )
-
- if IMPORT_ERROR:
- module.fail_json(msg="Unable to import dyn module: https://pypi.python.org/pypi/dyn", error=IMPORT_ERROR)
-
- if module.params['record_ttl'] != None and int(module.params['record_ttl']) <= 0:
- module.fail_json(msg="Invalid Value for record TTL")
-
- # Start the Dyn session
- try:
- _ = DynectSession(module.params['customer_name'],
- module.params['user_name'],
- module.params['user_password'])
- except dyn.tm.errors.DynectAuthError as error:
- module.fail_json(msg='Unable to authenticate with Dyn', error=str(error))
-
- # Retrieve zone object
- try:
- dyn_zone = Zone(module.params['zone'])
- except dyn.tm.errors.DynectGetError as error:
- if 'No such zone' in str(error):
- module.fail_json(msg="Not a valid zone for this account", zone=module.params['zone'])
- else:
- module.fail_json(msg="Unable to retrieve zone", error=str(error))
-
- # To retrieve the node object we need to remove the zone name from the FQDN
- dyn_node_name = module.params['record_fqdn'].replace('.' + module.params['zone'], '')
-
- # Retrieve the zone object from dyn
- dyn_zone = Zone(module.params['zone'])
-
- # Retrieve the node object from dyn
- dyn_node = dyn_zone.get_node(node=dyn_node_name)
-
- # All states will need a list of the exiting records for the zone.
- dyn_node_records = get_any_records(module, dyn_node)
-
- dyn_values = get_record_values(dyn_node_records)
-
- if module.params['state'] == 'list':
- module.exit_json(changed=False, dyn_records=dyn_values)
-
- elif module.params['state'] == 'absent':
- # If there are any records present we'll want to delete the node.
- if dyn_node_records:
- dyn_node.delete()
-
- # Publish the zone since we've modified it.
- dyn_zone.publish()
-
- module.exit_json(changed=True, msg="Removed node %s from zone %s" % (dyn_node_name, module.params['zone']))
-
- module.exit_json(changed=False)
-
- elif module.params['state'] == 'present':
-
- # configure the TTL variable:
- # if use_zone_ttl, use the default TTL of the account.
- # if TTL == None, don't check it, set it as 0 (api default)
- # if TTL > 0, ensure this TTL is set
- if module.params['use_zone_ttl']:
- user_param_ttl = dyn_zone.ttl
- elif not module.params['record_ttl']:
- user_param_ttl = 0
- else:
- user_param_ttl = module.params['record_ttl']
-
- # First get a list of existing records for the node
- record_type_key = get_record_key(module.params['record_type'])
- user_record_value = module.params['record_value']
-
- # Check to see if the record is already in place before doing anything.
- if dyn_node_records and compare_record_values(record_type_key, user_record_value, dyn_values):
-
- if user_param_ttl == 0 or \
- compare_record_ttl(record_type_key, user_record_value, dyn_values, user_param_ttl):
- module.exit_json(changed=False, dyn_record=dyn_values)
-
- # Working on the assumption that there is only one record per
- # node we will first delete the node if there are any records before
- # creating the correct record
- if dyn_node_records:
- dyn_node.delete()
-
- # Now lets create the correct node entry.
- record = dyn_zone.add_record(dyn_node_name,
- module.params['record_type'],
- module.params['record_value'],
- user_param_ttl
- )
-
- # Now publish the zone since we've updated it.
- dyn_zone.publish()
-
- rmsg = "Created node [%s] " % dyn_node_name
- rmsg += "in zone: [%s]" % module.params['zone']
- module.exit_json(changed=True, msg=rmsg, dyn_record=get_record_values({record_type_key: [record]}))
-
- module.fail_json(msg="Unknown state: [%s]" % module.params['state'])
-
-# Ansible tends to need a wild card import so we'll use it here
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-from ansible.module_utils.basic import *
-if __name__ == '__main__':
- main()
diff --git a/roles/lib_dyn/meta/main.yml b/roles/lib_dyn/meta/main.yml
deleted file mode 100644
index 5475c6971..000000000
--- a/roles/lib_dyn/meta/main.yml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-galaxy_info:
- author: Russell Harrison
- description: A role to provide the dyn_record module
- company: Red Hat, Inc.
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: http://example.com/issue/tracker
- license: Apache
- min_ansible_version: 1.9
- platforms:
- - name: EL
- versions:
- - 7
- #- name: Fedora
- # versions:
- # - 19
- # - 20
- # - 21
- # - 22
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- categories:
- - networking
-dependencies: []
- # List your role dependencies here, one per line.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
- #
- # No role dependencies at this time. The module contained in this role does
- # require the dyn python module.
- # https://pypi.python.org/pypi/dyn
-
diff --git a/roles/lib_dyn/tasks/main.yml b/roles/lib_dyn/tasks/main.yml
deleted file mode 100644
index 965962928..000000000
--- a/roles/lib_dyn/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# tasks file for lib_dyn
-
-- name: Make sure python-dyn is installed
- yum: name=python-dyn state=present
- tags:
- - lib_dyn
diff --git a/roles/lib_openshift_api/build/ansible/edit.py b/roles/lib_openshift_api/build/ansible/edit.py
deleted file mode 100644
index 943fa47a6..000000000
--- a/roles/lib_openshift_api/build/ansible/edit.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# pylint: skip-file
-
-def main():
- '''
- ansible oc module for services
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- ]),
- file_name=dict(default=None, type='str'),
- file_format=dict(default='yaml', type='str'),
- content=dict(default=None, required=True, type='dict'),
- force=dict(default=False, type='bool'),
- ),
- supports_check_mode=True,
- )
- ocedit = Edit(module.params['kind'],
- module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = ocedit.get()
-
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.fail_json(msg=api_rval)
-
- ########
- # Update
- ########
- api_rval = ocedit.update(module.params['file_name'],
- module.params['content'],
- module.params['force'],
- module.params['file_format'])
-
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- if api_rval.has_key('updated') and not api_rval['updated']:
- module.exit_json(changed=False, results=api_rval, state="present")
-
- # return the created object
- api_rval = ocedit.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_openshift_api/build/ansible/obj.py b/roles/lib_openshift_api/build/ansible/obj.py
deleted file mode 100644
index a14ac0e43..000000000
--- a/roles/lib_openshift_api/build/ansible/obj.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# pylint: skip-file
-
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible oc module for services
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, type='str'),
- files=dict(default=None, type='list'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- ]),
- delete_after=dict(default=False, type='bool'),
- content=dict(default=None, type='dict'),
- force=dict(default=False, type='bool'),
- ),
- mutually_exclusive=[["content", "files"]],
-
- supports_check_mode=True,
- )
- ocobj = OCObject(module.params['kind'],
- module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = ocobj.get()
-
- #####
- # Get
- #####
- if state == 'list':
- module.exit_json(changed=False, results=api_rval['results'], state="list")
-
- if not module.params['name']:
- module.fail_json(msg='Please specify a name when state is absent|present.')
- ########
- # Delete
- ########
- if state == 'absent':
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = ocobj.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
- if state == 'present':
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- # Create it here
- api_rval = ocobj.create(module.params['files'], module.params['content'])
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # return the created object
- api_rval = ocobj.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # Remove files
- if module.params['files'] and module.params['delete_after']:
- Utils.cleanup(module.params['files'])
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- # if a file path is passed, use it.
- update = ocobj.needs_update(module.params['files'], module.params['content'])
- if not isinstance(update, bool):
- module.fail_json(msg=update)
-
- # No changes
- if not update:
- if module.params['files'] and module.params['delete_after']:
- Utils.cleanup(module.params['files'])
-
- module.exit_json(changed=False, results=api_rval['results'][0], state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = ocobj.update(module.params['files'],
- module.params['content'],
- module.params['force'])
-
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # return the created object
- api_rval = ocobj.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_openshift_api/build/ansible/router.py b/roles/lib_openshift_api/build/ansible/router.py
deleted file mode 100644
index 3b24c7b5e..000000000
--- a/roles/lib_openshift_api/build/ansible/router.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# pylint: skip-file
-
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', type='str',
- choices=['present', 'absent']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default='router', type='str'),
-
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
- cert_file=dict(default=None, type='str'),
- key_file=dict(default=None, type='str'),
- image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
- latest_image=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
- ports=dict(default=['80:80', '443:443'], type='list'),
- replicas=dict(default=1, type='int'),
- selector=dict(default=None, type='str'),
- service_account=dict(default='router', type='str'),
- router_type=dict(default='haproxy-router', type='str'),
- host_network=dict(default=True, type='bool'),
- # external host options
- external_host=dict(default=None, type='str'),
- external_host_vserver=dict(default=None, type='str'),
- external_host_insecure=dict(default=False, type='bool'),
- external_host_partition_path=dict(default=None, type='str'),
- external_host_username=dict(default=None, type='str'),
- external_host_password=dict(default=None, type='str'),
- external_host_private_key=dict(default=None, type='str'),
- # Metrics
- expose_metrics=dict(default=False, type='bool'),
- metrics_image=dict(default=None, type='str'),
- # Stats
- stats_user=dict(default=None, type='str'),
- stats_password=dict(default=None, type='str'),
- stats_port=dict(default=1936, type='int'),
-
- ),
- mutually_exclusive=[["router_type", "images"]],
-
- supports_check_mode=True,
- )
-
- rconfig = RouterConfig(module.params['name'],
- module.params['kubeconfig'],
- {'credentials': {'value': module.params['credentials'], 'include': True},
- 'default_cert': {'value': None, 'include': True},
- 'cert_file': {'value': module.params['cert_file'], 'include': False},
- 'key_file': {'value': module.params['key_file'], 'include': False},
- 'image': {'value': module.params['image'], 'include': True},
- 'latest_image': {'value': module.params['latest_image'], 'include': True},
- 'labels': {'value': module.params['labels'], 'include': True},
- 'ports': {'value': ','.join(module.params['ports']), 'include': True},
- 'replicas': {'value': module.params['replicas'], 'include': True},
- 'selector': {'value': module.params['selector'], 'include': True},
- 'service_account': {'value': module.params['service_account'], 'include': True},
- 'router_type': {'value': module.params['router_type'], 'include': False},
- 'host_network': {'value': module.params['host_network'], 'include': True},
- 'external_host': {'value': module.params['external_host'], 'include': True},
- 'external_host_vserver': {'value': module.params['external_host_vserver'],
- 'include': True},
- 'external_host_insecure': {'value': module.params['external_host_insecure'],
- 'include': True},
- 'external_host_partition_path': {'value': module.params['external_host_partition_path'],
- 'include': True},
- 'external_host_username': {'value': module.params['external_host_username'],
- 'include': True},
- 'external_host_password': {'value': module.params['external_host_password'],
- 'include': True},
- 'external_host_private_key': {'value': module.params['external_host_private_key'],
- 'include': True},
- 'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
- 'metrics_image': {'value': module.params['metrics_image'], 'include': True},
- 'stats_user': {'value': module.params['stats_user'], 'include': True},
- 'stats_password': {'value': module.params['stats_password'], 'include': True},
- 'stats_port': {'value': module.params['stats_port'], 'include': True},
- })
-
-
- ocrouter = Router(rconfig)
-
- state = module.params['state']
-
- ########
- # Delete
- ########
- if state == 'absent':
- if not ocrouter.exists():
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = ocrouter.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
-
- if state == 'present':
- ########
- # Create
- ########
- if not ocrouter.exists():
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- api_rval = ocrouter.create()
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- if not ocrouter.needs_update():
- module.exit_json(changed=False, state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = ocrouter.update()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-main()
diff --git a/roles/lib_openshift_api/build/ansible/secret.py b/roles/lib_openshift_api/build/ansible/secret.py
deleted file mode 100644
index 8df7bbc64..000000000
--- a/roles/lib_openshift_api/build/ansible/secret.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# pylint: skip-file
-
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, type='str'),
- files=dict(default=None, type='list'),
- delete_after=dict(default=False, type='bool'),
- contents=dict(default=None, type='list'),
- force=dict(default=False, type='bool'),
- ),
- mutually_exclusive=[["contents", "files"]],
-
- supports_check_mode=True,
- )
- occmd = Secret(module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = occmd.get()
-
- #####
- # Get
- #####
- if state == 'list':
- module.exit_json(changed=False, results=api_rval['results'], state="list")
-
- if not module.params['name']:
- module.fail_json(msg='Please specify a name when state is absent|present.')
- ########
- # Delete
- ########
- if state == 'absent':
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = occmd.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
-
- if state == 'present':
- if module.params['files']:
- files = module.params['files']
- elif module.params['contents']:
- files = Utils.create_files_from_contents(module.params['contents'])
- else:
- module.fail_json(msg='Either specify files or contents.')
-
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- api_rval = occmd.create(module.params['files'], module.params['contents'])
-
- # Remove files
- if files and module.params['delete_after']:
- Utils.cleanup(files)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- secret = occmd.prep_secret(module.params['files'], module.params['contents'])
-
- if secret['returncode'] != 0:
- module.fail_json(msg=secret)
-
- if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
-
- # Remove files
- if files and module.params['delete_after']:
- Utils.cleanup(files)
-
- module.exit_json(changed=False, results=secret['results'], state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = occmd.update(files, force=module.params['force'])
-
- # Remove files
- if secret and module.params['delete_after']:
- Utils.cleanup(files)
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_openshift_api/build/generate.py b/roles/lib_openshift_api/build/generate.py
deleted file mode 100755
index 9fc1986f1..000000000
--- a/roles/lib_openshift_api/build/generate.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-'''
- Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
-'''
-
-import os
-
-# pylint: disable=anomalous-backslash-in-string
-GEN_STR = "#!/usr/bin/env python\n" + \
- "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
- "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
- "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
- "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
- "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
- "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
- "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
-
-OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
-
-
-FILES = {'oc_obj.py': ['src/base.py',
- '../../lib_yaml_editor/build/src/yedit.py',
- 'src/obj.py',
- 'ansible/obj.py',
- ],
- 'oc_secret.py': ['src/base.py',
- '../../lib_yaml_editor/build/src/yedit.py',
- 'src/secret.py',
- 'ansible/secret.py',
- ],
- 'oc_edit.py': ['src/base.py',
- '../../lib_yaml_editor/build/src/yedit.py',
- 'src/edit.py',
- 'ansible/edit.py',
- ],
- 'oadm_router.py': ['src/base.py',
- '../../lib_yaml_editor/build/src/yedit.py',
- 'src/router.py',
- 'ansible/router.py',
- ],
- }
-
-
-def main():
- ''' combine the necessary files to create the ansible module '''
- library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
- for fname, parts in FILES.items():
- with open(os.path.join(library, fname), 'w') as afd:
- afd.seek(0)
- afd.write(GEN_STR)
- for fpart in parts:
- with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
- # first line is pylint disable so skip it
- for idx, line in enumerate(pfd):
- if idx == 0 and 'skip-file' in line:
- continue
-
- afd.write(line)
-
-
-if __name__ == '__main__':
- main()
-
-
diff --git a/roles/lib_openshift_api/build/src/base.py b/roles/lib_openshift_api/build/src/base.py
deleted file mode 100644
index 257379d92..000000000
--- a/roles/lib_openshift_api/build/src/base.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# pylint: skip-file
-'''
- OpenShiftCLI class that wraps the oc commands in a subprocess
-'''
-
-import atexit
-import json
-import os
-import shutil
-import subprocess
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- '''return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-# pylint: disable=too-few-public-methods
-class OpenShiftCLI(object):
- ''' Class to wrap the command line tools '''
- def __init__(self,
- namespace,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftCLI '''
- self.namespace = namespace
- self.verbose = verbose
- self.kubeconfig = kubeconfig
-
- # Pylint allows only 5 arguments to be passed.
- # pylint: disable=too-many-arguments
- def _replace_content(self, resource, rname, content, force=False):
- ''' replace the current object with the content '''
- res = self._get(resource, rname)
- if not res['results']:
- return res
-
- fname = '/tmp/%s' % rname
- yed = Yedit(fname, res['results'][0])
- changes = []
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [fname])
-
- return self._replace(fname, force)
-
- def _replace(self, fname, force=False):
- '''return all pods '''
- cmd = ['-n', self.namespace, 'replace', '-f', fname]
- if force:
- cmd.append('--force')
- return self.openshift_cmd(cmd)
-
- def _create(self, fname):
- '''return all pods '''
- return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
-
- def _delete(self, resource, rname):
- '''return all pods '''
- return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
-
- def _get(self, resource, rname=None):
- '''return a secret by name '''
- cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
- if rname:
- cmd.append(rname)
-
- rval = self.openshift_cmd(cmd, output=True)
-
- # Ensure results are retuned in an array
- if rval.has_key('items'):
- rval['results'] = rval['items']
- elif not isinstance(rval['results'], list):
- rval['results'] = [rval['results']]
-
- return rval
-
- def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
- '''Base command for oc '''
- #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = []
- if oadm:
- cmds = ['/usr/bin/oadm']
- else:
- cmds = ['/usr/bin/oc']
-
- cmds.extend(cmd)
-
- rval = {}
- results = ''
- err = None
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
-
- proc.wait()
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
- rval = {"returncode": proc.returncode,
- "results": results,
- "cmd": ' '.join(cmds),
- }
-
- if proc.returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print stdout
- print stderr
- print
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds
- })
-
- else:
- rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {},
- })
-
- return rval
-
-class Utils(object):
- ''' utilities for openshiftcli modules '''
- @staticmethod
- def create_file(rname, data, ftype=None):
- ''' create a file in tmp with name and contents'''
- path = os.path.join('/tmp', rname)
- with open(path, 'w') as fds:
- if ftype == 'yaml':
- fds.write(yaml.safe_dump(data, default_flow_style=False))
-
- elif ftype == 'json':
- fds.write(json.dumps(data))
- else:
- fds.write(data)
-
- # Register cleanup when module is done
- atexit.register(Utils.cleanup, [path])
- return path
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
-
- for sfile in data:
- path = Utils.create_file(sfile['path'], sfile['content'])
- files.append(path)
-
- return files
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
- @staticmethod
- def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
-
- if Utils.find_result(results, _name):
- return True
-
- return False
-
- @staticmethod
- def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
- @staticmethod
- def get_resource_file(sfile, sfile_type='yaml'):
- ''' return the service file '''
- contents = None
- with open(sfile) as sfd:
- contents = sfd.read()
-
- if sfile_type == 'yaml':
- contents = yaml.safe_load(contents)
- elif sfile_type == 'json':
- contents = json.loads(contents)
-
- return contents
-
- # Disabling too-many-branches. This is a yaml dictionary comparison function
- # pylint: disable=too-many-branches,too-many-return-statements
- @staticmethod
- def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['metadata', 'status']
- if skip_keys:
- skip.extend(skip_keys)
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- if debug:
- print 'user_def[key] is not a list'
- return False
-
- for values in zip(user_def[key], value):
- if isinstance(values[0], dict) and isinstance(values[1], dict):
- if debug:
- print 'sending list - list'
- print type(values[0])
- print type(values[1])
- result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
- if not result:
- print 'list compare returned false'
- return False
-
- elif value != user_def[key]:
- if debug:
- print 'value should be identical'
- print value
- print user_def[key]
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
- if not result:
- if debug:
- print "dict returned false"
- print result
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
diff --git a/roles/lib_openshift_api/build/src/edit.py b/roles/lib_openshift_api/build/src/edit.py
deleted file mode 100644
index 7020ace47..000000000
--- a/roles/lib_openshift_api/build/src/edit.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# pylint: skip-file
-
-class Edit(OpenShiftCLI):
- ''' Class to wrap the oc command line tools
- '''
- # pylint: disable=too-many-arguments
- def __init__(self,
- kind,
- namespace,
- resource_name=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(Edit, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
- self.kind = kind
- self.name = resource_name
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a secret by name '''
- return self._get(self.kind, self.name)
-
- def update(self, file_name, content, force=False, content_type='yaml'):
- '''run update '''
- if file_name:
- if content_type == 'yaml':
- data = yaml.load(open(file_name))
- elif content_type == 'json':
- data = json.loads(open(file_name).read())
-
- changes = []
- yed = Yedit(file_name, data)
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [file_name])
-
- return self._replace(file_name, force=force)
-
- return self._replace_content(self.kind, self.name, content, force=force)
-
-
diff --git a/roles/lib_openshift_api/build/src/obj.py b/roles/lib_openshift_api/build/src/obj.py
deleted file mode 100644
index 13aeba8e1..000000000
--- a/roles/lib_openshift_api/build/src/obj.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# pylint: skip-file
-
-class OCObject(OpenShiftCLI):
- ''' Class to wrap the oc command line tools '''
-
- # pylint allows 5. we need 6
- # pylint: disable=too-many-arguments
- def __init__(self,
- kind,
- namespace,
- rname=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(OCObject, self).__init__(namespace, kubeconfig)
- self.kind = kind
- self.namespace = namespace
- self.name = rname
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a deploymentconfig by name '''
- return self._get(self.kind, rname=self.name)
-
- def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
-
- def create(self, files=None, content=None):
- '''Create a deploymentconfig '''
- if files:
- return self._create(files[0])
-
- return self._create(Utils.create_files_from_contents(content))
-
-
- # pylint: disable=too-many-function-args
- def update(self, files=None, content=None, force=False):
- '''run update dc
-
- This receives a list of file names and takes the first filename and calls replace.
- '''
- if files:
- return self._replace(files[0], force)
-
- return self.update_content(content, force)
-
- def update_content(self, content, force=False):
- '''update the dc with the content'''
- return self._replace_content(self.kind, self.name, content, force=force)
-
- def needs_update(self, files=None, content=None, content_type='yaml'):
- ''' check to see if we need to update '''
- objects = self.get()
- if objects['returncode'] != 0:
- return objects
-
- # pylint: disable=no-member
- data = None
- if files:
- data = Utils.get_resource_file(files[0], content_type)
-
- # if equal then no need. So not equal is True
- return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
- else:
- data = content
-
- for key, value in data.items():
- if key == 'metadata':
- continue
- if not objects['results'][0].has_key(key):
- return True
- if value != objects['results'][0][key]:
- return True
-
- return False
-
diff --git a/roles/lib_openshift_api/build/src/router.py b/roles/lib_openshift_api/build/src/router.py
deleted file mode 100644
index 69454d594..000000000
--- a/roles/lib_openshift_api/build/src/router.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# pylint: skip-file
-
-import time
-
-class RouterConfig(object):
- ''' RouterConfig is a DTO for the router. '''
- def __init__(self, rname, kubeconfig, router_options):
- self.name = rname
- self.kubeconfig = kubeconfig
- self._router_options = router_options
-
- @property
- def router_options(self):
- ''' return router options '''
- return self._router_options
-
- def to_option_list(self):
- ''' return all options as a string'''
- return RouterConfig.stringify(self.router_options)
-
- @staticmethod
- def stringify(options):
- ''' return hash as list of key value pairs '''
- rval = []
- for key, data in options.items():
- if data['include'] and data['value']:
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
-
- return rval
-
-class Router(OpenShiftCLI):
- ''' Class to wrap the oc command line tools '''
- def __init__(self,
- router_config,
- verbose=False):
- ''' Constructor for OpenshiftOC
-
- a router consists of 3 or more parts
- - dc/router
- - svc/router
- - endpoint/router
- '''
- super(Router, self).__init__('default', router_config.kubeconfig, verbose)
- self.rconfig = router_config
- self.verbose = verbose
- self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
- {'kind': 'svc', 'name': self.rconfig.name},
- #{'kind': 'endpoints', 'name': self.rconfig.name},
- ]
- def get(self, filter_kind=None):
- ''' return the self.router_parts '''
- rparts = self.router_parts
- parts = []
- if filter_kind:
- rparts = [part for part in self.router_parts if filter_kind == part['kind']]
-
- for part in rparts:
- parts.append(self._get(part['kind'], rname=part['name']))
-
- return parts
-
- def exists(self):
- '''return a deploymentconfig by name '''
- parts = self.get()
- for part in parts:
- if part['returncode'] != 0:
- return False
-
- return True
-
- def delete(self):
- '''return all pods '''
- parts = []
- for part in self.router_parts:
- parts.append(self._delete(part['kind'], part['name']))
-
- return parts
-
- def create(self, dryrun=False, output=False, output_type='json'):
- '''Create a deploymentconfig '''
- # We need to create the pem file
- router_pem = '/tmp/router.pem'
- with open(router_pem, 'w') as rfd:
- rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
- rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
-
- atexit.register(Utils.cleanup, [router_pem])
- self.rconfig.router_options['default_cert']['value'] = router_pem
-
- options = self.rconfig.to_option_list()
-
- cmd = ['router']
- cmd.extend(options)
- if dryrun:
- cmd.extend(['--dry-run=True', '-o', 'json'])
-
- results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
-
- return results
-
- def update(self):
- '''run update for the router. This performs a delete and then create '''
- parts = self.delete()
- if any([part['returncode'] != 0 for part in parts]):
- return parts
-
- # Ugly built in sleep here.
- time.sleep(15)
-
- return self.create()
-
- def needs_update(self, verbose=False):
- ''' check to see if we need to update '''
- dc_inmem = self.get(filter_kind='dc')[0]
- if dc_inmem['returncode'] != 0:
- return dc_inmem
-
- user_dc = self.create(dryrun=True, output=True, output_type='raw')
- if user_dc['returncode'] != 0:
- return user_dc
-
- # Since the output from oadm_router is returned as raw
- # we need to parse it. The first line is the stats_password
- user_dc_results = user_dc['results'].split('\n')
- # stats_password = user_dc_results[0]
-
- # Load the string back into json and get the newly created dc
- user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
-
- # Router needs some exceptions.
- # We do not want to check the autogenerated password for stats admin
- if not self.rconfig.router_options['stats_password']['value']:
- for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
- if env_var['name'] == 'STATS_PASSWORD':
- env_var['value'] = \
- dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
-
- # dry-run doesn't add the protocol to the ports section. We will manually do that.
- for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
- if not port.has_key('protocol'):
- port['protocol'] = 'TCP'
-
- # These are different when generating
- skip = ['dnsPolicy',
- 'terminationGracePeriodSeconds',
- 'restartPolicy', 'timeoutSeconds',
- 'livenessProbe', 'readinessProbe',
- 'terminationMessagePath',
- 'rollingParams',
- ]
-
- return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
diff --git a/roles/lib_openshift_api/build/src/secret.py b/roles/lib_openshift_api/build/src/secret.py
deleted file mode 100644
index 154716828..000000000
--- a/roles/lib_openshift_api/build/src/secret.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# pylint: skip-file
-
-class Secret(OpenShiftCLI):
- ''' Class to wrap the oc command line tools
- '''
- def __init__(self,
- namespace,
- secret_name=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(Secret, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
- self.name = secret_name
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a secret by name '''
- return self._get('secrets', self.name)
-
- def delete(self):
- '''delete a secret by name'''
- return self._delete('secrets', self.name)
-
- def create(self, files=None, contents=None):
- '''Create a secret '''
- if not files:
- files = Utils.create_files_from_contents(contents)
-
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.openshift_cmd(cmd)
-
- def update(self, files, force=False):
- '''run update secret
-
- This receives a list of file names and converts it into a secret.
- The secret is then written to disk and passed into the `oc replace` command.
- '''
- secret = self.prep_secret(files)
- if secret['returncode'] != 0:
- return secret
-
- sfile_path = '/tmp/%s' % self.name
- with open(sfile_path, 'w') as sfd:
- sfd.write(json.dumps(secret['results']))
-
- atexit.register(Utils.cleanup, [sfile_path])
-
- return self._replace(sfile_path, force=force)
-
- def prep_secret(self, files=None, contents=None):
- ''' return what the secret would look like if created
- This is accomplished by passing -ojson. This will most likely change in the future
- '''
- if not files:
- files = Utils.create_files_from_contents(contents)
-
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.openshift_cmd(cmd, output=True)
-
-
diff --git a/roles/lib_openshift_api/build/test/README b/roles/lib_openshift_api/build/test/README
deleted file mode 100644
index af9f05b3d..000000000
--- a/roles/lib_openshift_api/build/test/README
+++ /dev/null
@@ -1,5 +0,0 @@
-After generate.py has run, the ansible modules will be placed under ../../../openshift-ansible/roles/lib_openshift_api/library.
-
-
-To run the tests you need to run them like this:
-./services.yml -M ../../library
diff --git a/roles/lib_openshift_api/build/test/deploymentconfig.yml b/roles/lib_openshift_api/build/test/deploymentconfig.yml
deleted file mode 100755
index d041ab22a..000000000
--- a/roles/lib_openshift_api/build/test/deploymentconfig.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: "oo_clusterid_mwoodson:&oo_version_3:&oo_master_primary"
- gather_facts: no
- user: root
-
- post_tasks:
- - copy:
- dest: "/tmp/{{ item }}"
- src: "files/{{ item }}"
- with_items:
- - dc.yml
-
- - name: list dc
- oc_obj:
- kind: dc
- state: list
- namespace: default
- name: router
- register: dcout
-
- - debug:
- var: dcout
-
- - name: absent dc
- oc_obj:
- kind: dc
- state: absent
- namespace: default
- name: router
- register: dcout
-
- - debug:
- var: dcout
-
- - name: present dc
- oc_obj:
- kind: dc
- state: present
- namespace: default
- name: router
- files:
- - /tmp/dc.yml
- register: dcout
-
- - debug:
- var: dcout
-
- - name: dump router
- oc_obj:
- kind: dc
- state: list
- name: router
- register: routerout
-
- - name: write router file
- copy:
- dest: /tmp/dc-mod.json
- content: "{{ routerout.results[0] }}"
-
- - command: cat /tmp/dc-mod.json
- register: catout
-
- - debug:
- msg: "{{ catout }}"
-
- - command: "sed -i 's/: 80/: 81/g' /tmp/dc-mod.json"
- register: catout
-
- - name: present dc update
- oc_obj:
- kind: dc
- state: present
- namespace: default
- name: router
- files:
- - /tmp/dc-mod.json
- delete_after: True
- register: dcout
-
- - debug:
- var: dcout
-
- - include_vars: "files/dc-mod.yml"
-
- - name: absent dc
- oc_obj:
- kind: dc
- state: absent
- namespace: default
- name: router
- register: dcout
-
- - debug:
- var: dcout
-
- - name: present dc
- oc_obj:
- kind: dc
- state: present
- namespace: default
- name: router
- files:
- - /tmp/dc.yml
- delete_after: True
- register: dcout
-
- - name: present dc
- oc_obj:
- kind: dc
- state: present
- namespace: default
- name: router
- content: "{{ dc }}"
- delete_after: True
- register: dcout
-
- - debug:
- var: dcout
-
diff --git a/roles/lib_openshift_api/build/test/edit.yml b/roles/lib_openshift_api/build/test/edit.yml
deleted file mode 100755
index 9aa01303a..000000000
--- a/roles/lib_openshift_api/build/test/edit.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: "oo_clusterid_mwoodson:&oo_version_3:&oo_master_primary"
- gather_facts: no
- user: root
-
- post_tasks:
- - copy:
- dest: "/tmp/{{ item }}"
- src: "files/{{ item }}"
- with_items:
- - dc.yml
-
- - name: present dc
- oc_edit:
- kind: dc
- namespace: default
- name: router
- content:
- spec.template.spec.containers[0].ports[0].containerPort: 80
- spec.template.spec.containers[0].ports[0].hostPort: 80
- register: dcout
-
- - debug:
- var: dcout
-
- - name: present dc
- oc_edit:
- kind: dc
- namespace: default
- name: router
- content:
- spec.template.spec.containers[0].ports[0].containerPort: 81
- spec.template.spec.containers[0].ports[0].hostPort: 81
- file_format: yaml
- register: dcout
-
- - debug:
- var: dcout
-
- - name: present dc
- oc_edit:
- kind: dc
- namespace: default
- name: router
- content:
- spec.template.spec.containers[0].ports[0].containerPort: 80
- spec.template.spec.containers[0].ports[0].hostPort: 80
- file_format: yaml
- register: dcout
-
- - debug:
- var: dcout
diff --git a/roles/lib_openshift_api/build/test/files/config.yml b/roles/lib_openshift_api/build/test/files/config.yml
deleted file mode 100644
index c544c6fd4..000000000
--- a/roles/lib_openshift_api/build/test/files/config.yml
+++ /dev/null
@@ -1 +0,0 @@
-value: True
diff --git a/roles/lib_openshift_api/build/test/files/dc-mod.yml b/roles/lib_openshift_api/build/test/files/dc-mod.yml
deleted file mode 100644
index 6c700d6c7..000000000
--- a/roles/lib_openshift_api/build/test/files/dc-mod.yml
+++ /dev/null
@@ -1,124 +0,0 @@
-dc:
- path:
- dc-mod.yml
- content:
- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- labels:
- router: router
- name: router
- namespace: default
- resourceVersion: "84016"
- selfLink: /oapi/v1/namespaces/default/deploymentconfigs/router
- uid: 48f8b9d9-ed42-11e5-9903-0a9a9d4e7f2b
- spec:
- replicas: 2
- selector:
- router: router
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 0
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePercent: -25
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- creationTimestamp: null
- labels:
- router: router
- spec:
- containers:
- - env:
- - name: DEFAULT_CERTIFICATE
- - name: OPENSHIFT_CA_DATA
- value: |
- -----BEGIN CERTIFICATE-----
- MIIC5jCCAdCgAwIBAgIBATALBgkqhkiG9w0BAQswJjEkMCIGA1UEAwwbb3BlbnNo
- -----END CERTIFICATE-----
- - name: OPENSHIFT_CERT_DATA
- value: |
- -----BEGIN CERTIFICATE-----
- MIIDDTCCAfegAwIBAgIBCDALBgkqhkiG9w0BAQswJjEkMCIGA1UEAwwbb3BlbnNo
- -----END CERTIFICATE-----
- - name: OPENSHIFT_INSECURE
- value: "false"
- - name: OPENSHIFT_KEY_DATA
- value: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEogIBAAKCAQEA2lf49DrPHfCdCORcnIbmDVrx8yos7trjWdBvuledijyslRVR
- -----END RSA PRIVATE KEY-----
- - name: OPENSHIFT_MASTER
- value: https://internal.api.mwoodson.openshift.com
- - name: ROUTER_EXTERNAL_HOST_HOSTNAME
- - name: ROUTER_EXTERNAL_HOST_HTTPS_VSERVER
- - name: ROUTER_EXTERNAL_HOST_HTTP_VSERVER
- - name: ROUTER_EXTERNAL_HOST_INSECURE
- value: "false"
- - name: ROUTER_EXTERNAL_HOST_PARTITION_PATH
- - name: ROUTER_EXTERNAL_HOST_PASSWORD
- - name: ROUTER_EXTERNAL_HOST_PRIVKEY
- value: /etc/secret-volume/router.pem
- - name: ROUTER_EXTERNAL_HOST_USERNAME
- - name: ROUTER_SERVICE_NAME
- value: router
- - name: ROUTER_SERVICE_NAMESPACE
- value: default
- - name: STATS_PASSWORD
- value: ugCk6YBm4q
- - name: STATS_PORT
- value: "1936"
- - name: STATS_USERNAME
- value: admin
- image: openshift3/ose-haproxy-router:v3.1.1.6
- imagePullPolicy: IfNotPresent
- livenessProbe:
- httpGet:
- host: localhost
- path: /healthz
- port: 1936
- scheme: HTTP
- initialDelaySeconds: 10
- timeoutSeconds: 1
- name: router
- ports:
- - containerPort: 81
- hostPort: 81
- protocol: TCP
- - containerPort: 443
- hostPort: 443
- protocol: TCP
- - containerPort: 1936
- hostPort: 1936
- name: stats
- protocol: TCP
- readinessProbe:
- httpGet:
- host: localhost
- path: /healthz
- port: 1937
- scheme: HTTP
- timeoutSeconds: 1
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- hostNetwork: true
- nodeSelector:
- type: infra
- restartPolicy: Always
- securityContext: {}
- serviceAccount: router
- serviceAccountName: router
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status:
- details:
- causes:
- - type: ConfigChange
- latestVersion: 1
-
diff --git a/roles/lib_openshift_api/build/test/files/dc.yml b/roles/lib_openshift_api/build/test/files/dc.yml
deleted file mode 100644
index 24f690ef4..000000000
--- a/roles/lib_openshift_api/build/test/files/dc.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-apiVersion: v1
-kind: DeploymentConfig
-metadata:
- creationTimestamp: 2016-04-01T15:23:29Z
- labels:
- router: router
- name: router
- namespace: default
- resourceVersion: "1338477"
- selfLink: /oapi/v1/namespaces/default/deploymentconfigs/router
- uid: b00c7eba-f81d-11e5-809b-0a581f893e3f
-spec:
- replicas: 2
- selector:
- router: router
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 0
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePercent: -25
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- creationTimestamp: null
- labels:
- router: router
- spec:
- containers:
- - env:
- - name: DEFAULT_CERTIFICATE
- - name: OPENSHIFT_CA_DATA
- value: |
- -----BEGIN CERTIFICATE-----
- MIIC5jCCAdCgAwIBAgIBATALBgkqhkiG9w0BAQswJjEkMCIGA1UEAwwbb3BlbnNo
- -----END CERTIFICATE-----
- - name: OPENSHIFT_CERT_DATA
- value: |
- -----BEGIN CERTIFICATE-----
- MIIDDTCCAfegAwIBAgIBCDALBgkqhkiG9w0BAQswJjEkMCIGA1UEAwwbb3BlbnNo
- -----END CERTIFICATE-----
- - name: OPENSHIFT_INSECURE
- value: "false"
- - name: OPENSHIFT_KEY_DATA
- value: |
- -----BEGIN RSA PRIVATE KEY-----
- MIIEogIBAAKCAQEA2lf49DrPHfCdCORcnIbmDVrx8yos7trjWdBvuledijyslRVR
- -----END RSA PRIVATE KEY-----
- - name: OPENSHIFT_MASTER
- value: https://internal.api.mwoodson.openshift.com
- - name: ROUTER_EXTERNAL_HOST_HOSTNAME
- - name: ROUTER_EXTERNAL_HOST_HTTPS_VSERVER
- - name: ROUTER_EXTERNAL_HOST_HTTP_VSERVER
- - name: ROUTER_EXTERNAL_HOST_INSECURE
- value: "false"
- - name: ROUTER_EXTERNAL_HOST_PARTITION_PATH
- - name: ROUTER_EXTERNAL_HOST_PASSWORD
- - name: ROUTER_EXTERNAL_HOST_PRIVKEY
- value: /etc/secret-volume/router.pem
- - name: ROUTER_EXTERNAL_HOST_USERNAME
- - name: ROUTER_SERVICE_NAME
- value: router
- - name: ROUTER_SERVICE_NAMESPACE
- value: default
- - name: STATS_PASSWORD
- value: ugCk6YBm4q
- - name: STATS_PORT
- value: "1936"
- - name: STATS_USERNAME
- value: admin
- image: openshift3/ose-haproxy-router:v3.1.1.6
- imagePullPolicy: IfNotPresent
- livenessProbe:
- httpGet:
- host: localhost
- path: /healthz
- port: 1936
- scheme: HTTP
- initialDelaySeconds: 10
- timeoutSeconds: 1
- name: router
- ports:
- - containerPort: 80
- hostPort: 80
- protocol: TCP
- - containerPort: 443
- hostPort: 443
- protocol: TCP
- - containerPort: 1936
- hostPort: 1936
- name: stats
- protocol: TCP
- readinessProbe:
- httpGet:
- host: localhost
- path: /healthz
- port: 1936
- scheme: HTTP
- timeoutSeconds: 1
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- hostNetwork: true
- nodeSelector:
- type: infra
- restartPolicy: Always
- securityContext: {}
- serviceAccount: router
- serviceAccountName: router
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
-status:
- details:
- causes:
- - type: ConfigChange
- latestVersion: 12
diff --git a/roles/lib_openshift_api/build/test/files/passwords.yml b/roles/lib_openshift_api/build/test/files/passwords.yml
deleted file mode 100644
index fadbf1d85..000000000
--- a/roles/lib_openshift_api/build/test/files/passwords.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-test1
-test2
-test3
-test4
diff --git a/roles/lib_openshift_api/build/test/files/router-mod.json b/roles/lib_openshift_api/build/test/files/router-mod.json
deleted file mode 100644
index 45e2e7c8d..000000000
--- a/roles/lib_openshift_api/build/test/files/router-mod.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "kind": "Service",
- "apiVersion": "v1",
- "metadata": {
- "name": "router",
- "namespace": "default",
- "labels": {
- "router": "router"
- }
- },
- "spec": {
- "ports": [
- {
- "name": "81-tcp",
- "protocol": "TCP",
- "port": 81,
- "targetPort": 81
- }
- ],
- "selector": {
- "router": "router"
- },
- "type": "ClusterIP",
- "sessionAffinity": "None"
- },
- "status": {
- "loadBalancer": {}
- }
-}
-
diff --git a/roles/lib_openshift_api/build/test/files/router.json b/roles/lib_openshift_api/build/test/files/router.json
deleted file mode 100644
index cad3c6f53..000000000
--- a/roles/lib_openshift_api/build/test/files/router.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "apiVersion": "v1",
- "kind": "Service",
- "metadata": {
- "labels": {
- "router": "router"
- },
- "name": "router",
- "namespace": "default"
- },
- "spec": {
- "ports": [
- {
- "name": "80-tcp",
- "port": 80,
- "protocol": "TCP",
- "targetPort": 80
- }
- ],
- "selector": {
- "router": "router"
- },
- "sessionAffinity": "None",
- "type": "ClusterIP"
- },
- "status": {
- "loadBalancer": {}
- }
-}
diff --git a/roles/lib_openshift_api/build/test/roles b/roles/lib_openshift_api/build/test/roles
deleted file mode 120000
index ae82aa9bb..000000000
--- a/roles/lib_openshift_api/build/test/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles/ \ No newline at end of file
diff --git a/roles/lib_openshift_api/build/test/router.yml b/roles/lib_openshift_api/build/test/router.yml
deleted file mode 100755
index 7ab192b97..000000000
--- a/roles/lib_openshift_api/build/test/router.yml
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: "oo_clusterid_mwoodson:&oo_master_primary"
- gather_facts: no
- user: root
-
- tasks:
- - oadm_router:
- state: absent
- credentials: /etc/origin/master/openshift-router.kubeconfig
- service_account: router
- replicas: 2
- namespace: default
- selector: type=infra
- cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
- key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
- register: routerout
-
- - debug: var=routerout
-
- - pause:
- seconds: 10
-
- - oadm_router:
- credentials: /etc/origin/master/openshift-router.kubeconfig
- service_account: router
- replicas: 2
- namespace: default
- selector: type=infra
- cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
- key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
- register: routerout
-
- - debug: var=routerout
-
- - pause:
- seconds: 10
-
- - oadm_router:
- credentials: /etc/origin/master/openshift-router.kubeconfig
- service_account: router
- replicas: 2
- namespace: default
- selector: type=infra
- cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
- key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
- register: routerout
-
- - debug: var=routerout
-
- - pause:
- seconds: 10
-
- - oadm_router:
- credentials: /etc/origin/master/openshift-router.kubeconfig
- service_account: router
- replicas: 3
- namespace: default
- selector: type=test
- cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
- key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
- register: routerout
-
- - debug: var=routerout
-
- - pause:
- seconds: 10
-
- - oadm_router:
- credentials: /etc/origin/master/openshift-router.kubeconfig
- service_account: router
- replicas: 2
- namespace: default
- selector: type=infra
- cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
- key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
- register: routerout
-
- - debug: var=routerout
diff --git a/roles/lib_openshift_api/build/test/secrets.yml b/roles/lib_openshift_api/build/test/secrets.yml
deleted file mode 100755
index dddc05c4d..000000000
--- a/roles/lib_openshift_api/build/test/secrets.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: "oo_clusterid_mwoodson:&oo_version_3:&oo_master_primary"
- gather_facts: no
- user: root
-
- post_tasks:
- - copy:
- dest: "/tmp/{{ item }}"
- src: "files/{{ item }}"
- with_items:
- - config.yml
- - passwords.yml
-
- - name: list secrets
- oc_secret:
- state: list
- namespace: default
- name: kenny
- register: secret_out
-
- - debug:
- var: secret_out
-
- - name: absent secrets
- oc_secret:
- state: absent
- namespace: default
- name: kenny
- register: secret_out
-
- - debug:
- var: secret_out
-
- - name: present secrets
- oc_secret:
- state: present
- namespace: default
- name: kenny
- files:
- - /tmp/config.yml
- - /tmp/passwords.yml
- delete_after: True
- register: secret_out
-
- - debug:
- var: secret_out
-
- - name: present secrets
- oc_secret:
- state: present
- namespace: default
- name: kenny
- contents:
- - path: config.yml
- content: "value: True\n"
- - path: passwords.yml
- content: "test1\ntest2\ntest3\ntest4\n"
- delete_after: True
- register: secret_out
-
- - debug:
- var: secret_out
-
- - name: present secrets update
- oc_secret:
- state: present
- namespace: default
- name: kenny
- contents:
- - path: config.yml
- content: "value: True\n"
- - path: passwords.yml
- content: "test1\ntest2\ntest3\ntest4\ntest5\n"
- delete_after: True
- force: True
- register: secret_out
-
- - debug:
- var: secret_out
-
diff --git a/roles/lib_openshift_api/build/test/services.yml b/roles/lib_openshift_api/build/test/services.yml
deleted file mode 100755
index a32e8d012..000000000
--- a/roles/lib_openshift_api/build/test/services.yml
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: "oo_clusterid_mwoodson:&oo_master_primary"
- gather_facts: no
- user: root
-
- roles:
- - roles/lib_yaml_editor
-
- tasks:
- - copy:
- dest: "/tmp/{{ item }}"
- src: "files/{{ item }}"
- with_items:
- - router.json
- - router-mod.json
-
- - name: list services
- oc_obj:
- kind: service
- state: list
- namespace: default
- name: router
- register: service_out
-
- - debug:
- var: service_out.results
-
- - name: absent service
- oc_obj:
- kind: service
- state: absent
- namespace: default
- name: router
- register: service_out
-
- - debug:
- var: service_out
-
- - name: present service create
- oc_obj:
- kind: service
- state: present
- namespace: default
- name: router
- files:
- - /tmp/router.json
- delete_after: True
- register: service_out
-
- - debug:
- var: service_out
-
- - name: dump router
- oc_obj:
- kind: service
- state: list
- name: router
- namespace: default
- register: routerout
-
- - name: write router file
- copy:
- dest: /tmp/router-mod.json
- content: "{{ routerout.results[0] }}"
-
- - command: cat /tmp/router-mod.json
- register: catout
-
- - debug:
- msg: "{{ catout }}"
-
- - command: "sed -i 's/80-tcp/81-tcp/g' /tmp/router-mod.json"
- register: catout
-
- - name: present service replace
- oc_obj:
- kind: service
- state: present
- namespace: default
- name: router
- files:
- - /tmp/router-mod.json
- #delete_after: True
- register: service_out
-
- - debug:
- var: service_out
-
- - name: list services
- oc_obj:
- kind: service
- state: list
- namespace: default
- name: router
- register: service_out
-
- - debug:
- var: service_out.results
-
- - set_fact:
- new_service: "{{ service_out.results[0] }}"
-
- - yedit:
- src: /tmp/routeryedit
- content: "{{ new_service }}"
- key: spec.ports
- value:
- - name: 80-tcp
- port: 80
- protocol: TCP
- targetPort: 80
-
- - yedit:
- src: /tmp/routeryedit
- state: list
- register: yeditout
-
- - debug:
- var: yeditout
-
- - name: present service replace
- oc_obj:
- kind: service
- state: present
- namespace: default
- name: router
- content: "{{ yeditout.results }}"
- delete_after: True
- register: service_out
-
- - debug:
- var: service_out
diff --git a/roles/lib_openshift_api/library/oadm_router.py b/roles/lib_openshift_api/library/oadm_router.py
deleted file mode 100644
index c6b45c14e..000000000
--- a/roles/lib_openshift_api/library/oadm_router.py
+++ /dev/null
@@ -1,807 +0,0 @@
-#!/usr/bin/env python
-# ___ ___ _ _ ___ ___ _ _____ ___ ___
-# / __| __| \| | __| _ \ /_\_ _| __| \
-# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
-# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
-# | |) | (_) | | .` | (_) || | | _|| |) | | | |
-# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
-'''
- OpenShiftCLI class that wraps the oc commands in a subprocess
-'''
-
-import atexit
-import json
-import os
-import shutil
-import subprocess
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- '''return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-# pylint: disable=too-few-public-methods
-class OpenShiftCLI(object):
- ''' Class to wrap the command line tools '''
- def __init__(self,
- namespace,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftCLI '''
- self.namespace = namespace
- self.verbose = verbose
- self.kubeconfig = kubeconfig
-
- # Pylint allows only 5 arguments to be passed.
- # pylint: disable=too-many-arguments
- def _replace_content(self, resource, rname, content, force=False):
- ''' replace the current object with the content '''
- res = self._get(resource, rname)
- if not res['results']:
- return res
-
- fname = '/tmp/%s' % rname
- yed = Yedit(fname, res['results'][0])
- changes = []
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [fname])
-
- return self._replace(fname, force)
-
- def _replace(self, fname, force=False):
- '''return all pods '''
- cmd = ['-n', self.namespace, 'replace', '-f', fname]
- if force:
- cmd.append('--force')
- return self.openshift_cmd(cmd)
-
- def _create(self, fname):
- '''return all pods '''
- return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
-
- def _delete(self, resource, rname):
- '''return all pods '''
- return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
-
- def _get(self, resource, rname=None):
- '''return a secret by name '''
- cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
- if rname:
- cmd.append(rname)
-
- rval = self.openshift_cmd(cmd, output=True)
-
- # Ensure results are retuned in an array
- if rval.has_key('items'):
- rval['results'] = rval['items']
- elif not isinstance(rval['results'], list):
- rval['results'] = [rval['results']]
-
- return rval
-
- def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
- '''Base command for oc '''
- #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = []
- if oadm:
- cmds = ['/usr/bin/oadm']
- else:
- cmds = ['/usr/bin/oc']
-
- cmds.extend(cmd)
-
- rval = {}
- results = ''
- err = None
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
-
- proc.wait()
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
- rval = {"returncode": proc.returncode,
- "results": results,
- "cmd": ' '.join(cmds),
- }
-
- if proc.returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print stdout
- print stderr
- print
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds
- })
-
- else:
- rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {},
- })
-
- return rval
-
-class Utils(object):
- ''' utilities for openshiftcli modules '''
- @staticmethod
- def create_file(rname, data, ftype=None):
- ''' create a file in tmp with name and contents'''
- path = os.path.join('/tmp', rname)
- with open(path, 'w') as fds:
- if ftype == 'yaml':
- fds.write(yaml.safe_dump(data, default_flow_style=False))
-
- elif ftype == 'json':
- fds.write(json.dumps(data))
- else:
- fds.write(data)
-
- # Register cleanup when module is done
- atexit.register(Utils.cleanup, [path])
- return path
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
-
- for sfile in data:
- path = Utils.create_file(sfile['path'], sfile['content'])
- files.append(path)
-
- return files
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
- @staticmethod
- def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
-
- if Utils.find_result(results, _name):
- return True
-
- return False
-
- @staticmethod
- def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
- @staticmethod
- def get_resource_file(sfile, sfile_type='yaml'):
- ''' return the service file '''
- contents = None
- with open(sfile) as sfd:
- contents = sfd.read()
-
- if sfile_type == 'yaml':
- contents = yaml.safe_load(contents)
- elif sfile_type == 'json':
- contents = json.loads(contents)
-
- return contents
-
- # Disabling too-many-branches. This is a yaml dictionary comparison function
- # pylint: disable=too-many-branches,too-many-return-statements
- @staticmethod
- def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['metadata', 'status']
- if skip_keys:
- skip.extend(skip_keys)
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- if debug:
- print 'user_def[key] is not a list'
- return False
-
- for values in zip(user_def[key], value):
- if isinstance(values[0], dict) and isinstance(values[1], dict):
- if debug:
- print 'sending list - list'
- print type(values[0])
- print type(values[1])
- result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
- if not result:
- print 'list compare returned false'
- return False
-
- elif value != user_def[key]:
- if debug:
- print 'value should be identical'
- print value
- print user_def[key]
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
- if not result:
- if debug:
- print "dict returned false"
- print result
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
-
-import time
-
-class RouterConfig(object):
- ''' RouterConfig is a DTO for the router. '''
- def __init__(self, rname, kubeconfig, router_options):
- self.name = rname
- self.kubeconfig = kubeconfig
- self._router_options = router_options
-
- @property
- def router_options(self):
- ''' return router options '''
- return self._router_options
-
- def to_option_list(self):
- ''' return all options as a string'''
- return RouterConfig.stringify(self.router_options)
-
- @staticmethod
- def stringify(options):
- ''' return hash as list of key value pairs '''
- rval = []
- for key, data in options.items():
- if data['include'] and data['value']:
- rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
-
- return rval
-
-class Router(OpenShiftCLI):
- ''' Class to wrap the oc command line tools '''
- def __init__(self,
- router_config,
- verbose=False):
- ''' Constructor for OpenshiftOC
-
- a router consists of 3 or more parts
- - dc/router
- - svc/router
- - endpoint/router
- '''
- super(Router, self).__init__('default', router_config.kubeconfig, verbose)
- self.rconfig = router_config
- self.verbose = verbose
- self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
- {'kind': 'svc', 'name': self.rconfig.name},
- #{'kind': 'endpoints', 'name': self.rconfig.name},
- ]
- def get(self, filter_kind=None):
- ''' return the self.router_parts '''
- rparts = self.router_parts
- parts = []
- if filter_kind:
- rparts = [part for part in self.router_parts if filter_kind == part['kind']]
-
- for part in rparts:
- parts.append(self._get(part['kind'], rname=part['name']))
-
- return parts
-
- def exists(self):
- '''return a deploymentconfig by name '''
- parts = self.get()
- for part in parts:
- if part['returncode'] != 0:
- return False
-
- return True
-
- def delete(self):
- '''return all pods '''
- parts = []
- for part in self.router_parts:
- parts.append(self._delete(part['kind'], part['name']))
-
- return parts
-
- def create(self, dryrun=False, output=False, output_type='json'):
- '''Create a deploymentconfig '''
- # We need to create the pem file
- router_pem = '/tmp/router.pem'
- with open(router_pem, 'w') as rfd:
- rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
- rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
-
- atexit.register(Utils.cleanup, [router_pem])
- self.rconfig.router_options['default_cert']['value'] = router_pem
-
- options = self.rconfig.to_option_list()
-
- cmd = ['router']
- cmd.extend(options)
- if dryrun:
- cmd.extend(['--dry-run=True', '-o', 'json'])
-
- results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
-
- return results
-
- def update(self):
- '''run update for the router. This performs a delete and then create '''
- parts = self.delete()
- if any([part['returncode'] != 0 for part in parts]):
- return parts
-
- # Ugly built in sleep here.
- time.sleep(15)
-
- return self.create()
-
- def needs_update(self, verbose=False):
- ''' check to see if we need to update '''
- dc_inmem = self.get(filter_kind='dc')[0]
- if dc_inmem['returncode'] != 0:
- return dc_inmem
-
- user_dc = self.create(dryrun=True, output=True, output_type='raw')
- if user_dc['returncode'] != 0:
- return user_dc
-
- # Since the output from oadm_router is returned as raw
- # we need to parse it. The first line is the stats_password
- user_dc_results = user_dc['results'].split('\n')
- # stats_password = user_dc_results[0]
-
- # Load the string back into json and get the newly created dc
- user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
-
- # Router needs some exceptions.
- # We do not want to check the autogenerated password for stats admin
- if not self.rconfig.router_options['stats_password']['value']:
- for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
- if env_var['name'] == 'STATS_PASSWORD':
- env_var['value'] = \
- dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
-
- # dry-run doesn't add the protocol to the ports section. We will manually do that.
- for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
- if not port.has_key('protocol'):
- port['protocol'] = 'TCP'
-
- # These are different when generating
- skip = ['dnsPolicy',
- 'terminationGracePeriodSeconds',
- 'restartPolicy', 'timeoutSeconds',
- 'livenessProbe', 'readinessProbe',
- 'terminationMessagePath',
- 'rollingParams',
- ]
-
- return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
-
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', type='str',
- choices=['present', 'absent']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default='router', type='str'),
-
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
- cert_file=dict(default=None, type='str'),
- key_file=dict(default=None, type='str'),
- image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
- latest_image=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
- ports=dict(default=['80:80', '443:443'], type='list'),
- replicas=dict(default=1, type='int'),
- selector=dict(default=None, type='str'),
- service_account=dict(default='router', type='str'),
- router_type=dict(default='haproxy-router', type='str'),
- host_network=dict(default=True, type='bool'),
- # external host options
- external_host=dict(default=None, type='str'),
- external_host_vserver=dict(default=None, type='str'),
- external_host_insecure=dict(default=False, type='bool'),
- external_host_partition_path=dict(default=None, type='str'),
- external_host_username=dict(default=None, type='str'),
- external_host_password=dict(default=None, type='str'),
- external_host_private_key=dict(default=None, type='str'),
- # Metrics
- expose_metrics=dict(default=False, type='bool'),
- metrics_image=dict(default=None, type='str'),
- # Stats
- stats_user=dict(default=None, type='str'),
- stats_password=dict(default=None, type='str'),
- stats_port=dict(default=1936, type='int'),
-
- ),
- mutually_exclusive=[["router_type", "images"]],
-
- supports_check_mode=True,
- )
-
- rconfig = RouterConfig(module.params['name'],
- module.params['kubeconfig'],
- {'credentials': {'value': module.params['credentials'], 'include': True},
- 'default_cert': {'value': None, 'include': True},
- 'cert_file': {'value': module.params['cert_file'], 'include': False},
- 'key_file': {'value': module.params['key_file'], 'include': False},
- 'image': {'value': module.params['image'], 'include': True},
- 'latest_image': {'value': module.params['latest_image'], 'include': True},
- 'labels': {'value': module.params['labels'], 'include': True},
- 'ports': {'value': ','.join(module.params['ports']), 'include': True},
- 'replicas': {'value': module.params['replicas'], 'include': True},
- 'selector': {'value': module.params['selector'], 'include': True},
- 'service_account': {'value': module.params['service_account'], 'include': True},
- 'router_type': {'value': module.params['router_type'], 'include': False},
- 'host_network': {'value': module.params['host_network'], 'include': True},
- 'external_host': {'value': module.params['external_host'], 'include': True},
- 'external_host_vserver': {'value': module.params['external_host_vserver'],
- 'include': True},
- 'external_host_insecure': {'value': module.params['external_host_insecure'],
- 'include': True},
- 'external_host_partition_path': {'value': module.params['external_host_partition_path'],
- 'include': True},
- 'external_host_username': {'value': module.params['external_host_username'],
- 'include': True},
- 'external_host_password': {'value': module.params['external_host_password'],
- 'include': True},
- 'external_host_private_key': {'value': module.params['external_host_private_key'],
- 'include': True},
- 'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
- 'metrics_image': {'value': module.params['metrics_image'], 'include': True},
- 'stats_user': {'value': module.params['stats_user'], 'include': True},
- 'stats_password': {'value': module.params['stats_password'], 'include': True},
- 'stats_port': {'value': module.params['stats_port'], 'include': True},
- })
-
-
- ocrouter = Router(rconfig)
-
- state = module.params['state']
-
- ########
- # Delete
- ########
- if state == 'absent':
- if not ocrouter.exists():
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = ocrouter.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
-
- if state == 'present':
- ########
- # Create
- ########
- if not ocrouter.exists():
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- api_rval = ocrouter.create()
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- if not ocrouter.needs_update():
- module.exit_json(changed=False, state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = ocrouter.update()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-main()
diff --git a/roles/lib_openshift_api/library/oc_edit.py b/roles/lib_openshift_api/library/oc_edit.py
deleted file mode 100644
index e43b6175a..000000000
--- a/roles/lib_openshift_api/library/oc_edit.py
+++ /dev/null
@@ -1,646 +0,0 @@
-#!/usr/bin/env python
-# ___ ___ _ _ ___ ___ _ _____ ___ ___
-# / __| __| \| | __| _ \ /_\_ _| __| \
-# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
-# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
-# | |) | (_) | | .` | (_) || | | _|| |) | | | |
-# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
-'''
- OpenShiftCLI class that wraps the oc commands in a subprocess
-'''
-
-import atexit
-import json
-import os
-import shutil
-import subprocess
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- '''return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-# pylint: disable=too-few-public-methods
-class OpenShiftCLI(object):
- ''' Class to wrap the command line tools '''
- def __init__(self,
- namespace,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftCLI '''
- self.namespace = namespace
- self.verbose = verbose
- self.kubeconfig = kubeconfig
-
- # Pylint allows only 5 arguments to be passed.
- # pylint: disable=too-many-arguments
- def _replace_content(self, resource, rname, content, force=False):
- ''' replace the current object with the content '''
- res = self._get(resource, rname)
- if not res['results']:
- return res
-
- fname = '/tmp/%s' % rname
- yed = Yedit(fname, res['results'][0])
- changes = []
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [fname])
-
- return self._replace(fname, force)
-
- def _replace(self, fname, force=False):
- '''return all pods '''
- cmd = ['-n', self.namespace, 'replace', '-f', fname]
- if force:
- cmd.append('--force')
- return self.openshift_cmd(cmd)
-
- def _create(self, fname):
- '''return all pods '''
- return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
-
- def _delete(self, resource, rname):
- '''return all pods '''
- return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
-
- def _get(self, resource, rname=None):
- '''return a secret by name '''
- cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
- if rname:
- cmd.append(rname)
-
- rval = self.openshift_cmd(cmd, output=True)
-
- # Ensure results are retuned in an array
- if rval.has_key('items'):
- rval['results'] = rval['items']
- elif not isinstance(rval['results'], list):
- rval['results'] = [rval['results']]
-
- return rval
-
- def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
- '''Base command for oc '''
- #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = []
- if oadm:
- cmds = ['/usr/bin/oadm']
- else:
- cmds = ['/usr/bin/oc']
-
- cmds.extend(cmd)
-
- rval = {}
- results = ''
- err = None
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
-
- proc.wait()
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
- rval = {"returncode": proc.returncode,
- "results": results,
- "cmd": ' '.join(cmds),
- }
-
- if proc.returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print stdout
- print stderr
- print
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds
- })
-
- else:
- rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {},
- })
-
- return rval
-
-class Utils(object):
- ''' utilities for openshiftcli modules '''
- @staticmethod
- def create_file(rname, data, ftype=None):
- ''' create a file in tmp with name and contents'''
- path = os.path.join('/tmp', rname)
- with open(path, 'w') as fds:
- if ftype == 'yaml':
- fds.write(yaml.safe_dump(data, default_flow_style=False))
-
- elif ftype == 'json':
- fds.write(json.dumps(data))
- else:
- fds.write(data)
-
- # Register cleanup when module is done
- atexit.register(Utils.cleanup, [path])
- return path
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
-
- for sfile in data:
- path = Utils.create_file(sfile['path'], sfile['content'])
- files.append(path)
-
- return files
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
- @staticmethod
- def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
-
- if Utils.find_result(results, _name):
- return True
-
- return False
-
- @staticmethod
- def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
- @staticmethod
- def get_resource_file(sfile, sfile_type='yaml'):
- ''' return the service file '''
- contents = None
- with open(sfile) as sfd:
- contents = sfd.read()
-
- if sfile_type == 'yaml':
- contents = yaml.safe_load(contents)
- elif sfile_type == 'json':
- contents = json.loads(contents)
-
- return contents
-
- # Disabling too-many-branches. This is a yaml dictionary comparison function
- # pylint: disable=too-many-branches,too-many-return-statements
- @staticmethod
- def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['metadata', 'status']
- if skip_keys:
- skip.extend(skip_keys)
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- if debug:
- print 'user_def[key] is not a list'
- return False
-
- for values in zip(user_def[key], value):
- if isinstance(values[0], dict) and isinstance(values[1], dict):
- if debug:
- print 'sending list - list'
- print type(values[0])
- print type(values[1])
- result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
- if not result:
- print 'list compare returned false'
- return False
-
- elif value != user_def[key]:
- if debug:
- print 'value should be identical'
- print value
- print user_def[key]
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
- if not result:
- if debug:
- print "dict returned false"
- print result
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
-
-class Edit(OpenShiftCLI):
- ''' Class to wrap the oc command line tools
- '''
- # pylint: disable=too-many-arguments
- def __init__(self,
- kind,
- namespace,
- resource_name=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(Edit, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
- self.kind = kind
- self.name = resource_name
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a secret by name '''
- return self._get(self.kind, self.name)
-
- def update(self, file_name, content, force=False, content_type='yaml'):
- '''run update '''
- if file_name:
- if content_type == 'yaml':
- data = yaml.load(open(file_name))
- elif content_type == 'json':
- data = json.loads(open(file_name).read())
-
- changes = []
- yed = Yedit(file_name, data)
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [file_name])
-
- return self._replace(file_name, force=force)
-
- return self._replace_content(self.kind, self.name, content, force=force)
-
-
-
-def main():
- '''
- ansible oc module for services
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- ]),
- file_name=dict(default=None, type='str'),
- file_format=dict(default='yaml', type='str'),
- content=dict(default=None, required=True, type='dict'),
- force=dict(default=False, type='bool'),
- ),
- supports_check_mode=True,
- )
- ocedit = Edit(module.params['kind'],
- module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = ocedit.get()
-
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.fail_json(msg=api_rval)
-
- ########
- # Update
- ########
- api_rval = ocedit.update(module.params['file_name'],
- module.params['content'],
- module.params['force'],
- module.params['file_format'])
-
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- if api_rval.has_key('updated') and not api_rval['updated']:
- module.exit_json(changed=False, results=api_rval, state="present")
-
- # return the created object
- api_rval = ocedit.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_openshift_api/library/oc_obj.py b/roles/lib_openshift_api/library/oc_obj.py
deleted file mode 100644
index f0ea66aee..000000000
--- a/roles/lib_openshift_api/library/oc_obj.py
+++ /dev/null
@@ -1,730 +0,0 @@
-#!/usr/bin/env python
-# ___ ___ _ _ ___ ___ _ _____ ___ ___
-# / __| __| \| | __| _ \ /_\_ _| __| \
-# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
-# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
-# | |) | (_) | | .` | (_) || | | _|| |) | | | |
-# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
-'''
- OpenShiftCLI class that wraps the oc commands in a subprocess
-'''
-
-import atexit
-import json
-import os
-import shutil
-import subprocess
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- '''return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-# pylint: disable=too-few-public-methods
-class OpenShiftCLI(object):
- ''' Class to wrap the command line tools '''
- def __init__(self,
- namespace,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftCLI '''
- self.namespace = namespace
- self.verbose = verbose
- self.kubeconfig = kubeconfig
-
- # Pylint allows only 5 arguments to be passed.
- # pylint: disable=too-many-arguments
- def _replace_content(self, resource, rname, content, force=False):
- ''' replace the current object with the content '''
- res = self._get(resource, rname)
- if not res['results']:
- return res
-
- fname = '/tmp/%s' % rname
- yed = Yedit(fname, res['results'][0])
- changes = []
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [fname])
-
- return self._replace(fname, force)
-
- def _replace(self, fname, force=False):
- '''return all pods '''
- cmd = ['-n', self.namespace, 'replace', '-f', fname]
- if force:
- cmd.append('--force')
- return self.openshift_cmd(cmd)
-
- def _create(self, fname):
- '''return all pods '''
- return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
-
- def _delete(self, resource, rname):
- '''return all pods '''
- return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
-
- def _get(self, resource, rname=None):
- '''return a secret by name '''
- cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
- if rname:
- cmd.append(rname)
-
- rval = self.openshift_cmd(cmd, output=True)
-
- # Ensure results are retuned in an array
- if rval.has_key('items'):
- rval['results'] = rval['items']
- elif not isinstance(rval['results'], list):
- rval['results'] = [rval['results']]
-
- return rval
-
- def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
- '''Base command for oc '''
- #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = []
- if oadm:
- cmds = ['/usr/bin/oadm']
- else:
- cmds = ['/usr/bin/oc']
-
- cmds.extend(cmd)
-
- rval = {}
- results = ''
- err = None
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
-
- proc.wait()
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
- rval = {"returncode": proc.returncode,
- "results": results,
- "cmd": ' '.join(cmds),
- }
-
- if proc.returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print stdout
- print stderr
- print
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds
- })
-
- else:
- rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {},
- })
-
- return rval
-
-class Utils(object):
- ''' utilities for openshiftcli modules '''
- @staticmethod
- def create_file(rname, data, ftype=None):
- ''' create a file in tmp with name and contents'''
- path = os.path.join('/tmp', rname)
- with open(path, 'w') as fds:
- if ftype == 'yaml':
- fds.write(yaml.safe_dump(data, default_flow_style=False))
-
- elif ftype == 'json':
- fds.write(json.dumps(data))
- else:
- fds.write(data)
-
- # Register cleanup when module is done
- atexit.register(Utils.cleanup, [path])
- return path
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
-
- for sfile in data:
- path = Utils.create_file(sfile['path'], sfile['content'])
- files.append(path)
-
- return files
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
- @staticmethod
- def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
-
- if Utils.find_result(results, _name):
- return True
-
- return False
-
- @staticmethod
- def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
- @staticmethod
- def get_resource_file(sfile, sfile_type='yaml'):
- ''' return the service file '''
- contents = None
- with open(sfile) as sfd:
- contents = sfd.read()
-
- if sfile_type == 'yaml':
- contents = yaml.safe_load(contents)
- elif sfile_type == 'json':
- contents = json.loads(contents)
-
- return contents
-
- # Disabling too-many-branches. This is a yaml dictionary comparison function
- # pylint: disable=too-many-branches,too-many-return-statements
- @staticmethod
- def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['metadata', 'status']
- if skip_keys:
- skip.extend(skip_keys)
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- if debug:
- print 'user_def[key] is not a list'
- return False
-
- for values in zip(user_def[key], value):
- if isinstance(values[0], dict) and isinstance(values[1], dict):
- if debug:
- print 'sending list - list'
- print type(values[0])
- print type(values[1])
- result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
- if not result:
- print 'list compare returned false'
- return False
-
- elif value != user_def[key]:
- if debug:
- print 'value should be identical'
- print value
- print user_def[key]
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
- if not result:
- if debug:
- print "dict returned false"
- print result
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
-
-class OCObject(OpenShiftCLI):
- ''' Class to wrap the oc command line tools '''
-
- # pylint allows 5. we need 6
- # pylint: disable=too-many-arguments
- def __init__(self,
- kind,
- namespace,
- rname=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(OCObject, self).__init__(namespace, kubeconfig)
- self.kind = kind
- self.namespace = namespace
- self.name = rname
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a deploymentconfig by name '''
- return self._get(self.kind, rname=self.name)
-
- def delete(self):
- '''return all pods '''
- return self._delete(self.kind, self.name)
-
- def create(self, files=None, content=None):
- '''Create a deploymentconfig '''
- if files:
- return self._create(files[0])
-
- return self._create(Utils.create_files_from_contents(content))
-
-
- # pylint: disable=too-many-function-args
- def update(self, files=None, content=None, force=False):
- '''run update dc
-
- This receives a list of file names and takes the first filename and calls replace.
- '''
- if files:
- return self._replace(files[0], force)
-
- return self.update_content(content, force)
-
- def update_content(self, content, force=False):
- '''update the dc with the content'''
- return self._replace_content(self.kind, self.name, content, force=force)
-
- def needs_update(self, files=None, content=None, content_type='yaml'):
- ''' check to see if we need to update '''
- objects = self.get()
- if objects['returncode'] != 0:
- return objects
-
- # pylint: disable=no-member
- data = None
- if files:
- data = Utils.get_resource_file(files[0], content_type)
-
- # if equal then no need. So not equal is True
- return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
- else:
- data = content
-
- for key, value in data.items():
- if key == 'metadata':
- continue
- if not objects['results'][0].has_key(key):
- return True
- if value != objects['results'][0][key]:
- return True
-
- return False
-
-
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible oc module for services
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, type='str'),
- files=dict(default=None, type='list'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- ]),
- delete_after=dict(default=False, type='bool'),
- content=dict(default=None, type='dict'),
- force=dict(default=False, type='bool'),
- ),
- mutually_exclusive=[["content", "files"]],
-
- supports_check_mode=True,
- )
- ocobj = OCObject(module.params['kind'],
- module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = ocobj.get()
-
- #####
- # Get
- #####
- if state == 'list':
- module.exit_json(changed=False, results=api_rval['results'], state="list")
-
- if not module.params['name']:
- module.fail_json(msg='Please specify a name when state is absent|present.')
- ########
- # Delete
- ########
- if state == 'absent':
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = ocobj.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
- if state == 'present':
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- # Create it here
- api_rval = ocobj.create(module.params['files'], module.params['content'])
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # return the created object
- api_rval = ocobj.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # Remove files
- if module.params['files'] and module.params['delete_after']:
- Utils.cleanup(module.params['files'])
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- # if a file path is passed, use it.
- update = ocobj.needs_update(module.params['files'], module.params['content'])
- if not isinstance(update, bool):
- module.fail_json(msg=update)
-
- # No changes
- if not update:
- if module.params['files'] and module.params['delete_after']:
- Utils.cleanup(module.params['files'])
-
- module.exit_json(changed=False, results=api_rval['results'][0], state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = ocobj.update(module.params['files'],
- module.params['content'],
- module.params['force'])
-
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- # return the created object
- api_rval = ocobj.get()
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_openshift_api/library/oc_secret.py b/roles/lib_openshift_api/library/oc_secret.py
deleted file mode 100644
index ca58d7139..000000000
--- a/roles/lib_openshift_api/library/oc_secret.py
+++ /dev/null
@@ -1,702 +0,0 @@
-#!/usr/bin/env python
-# ___ ___ _ _ ___ ___ _ _____ ___ ___
-# / __| __| \| | __| _ \ /_\_ _| __| \
-# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
-# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
-# | |) | (_) | | .` | (_) || | | _|| |) | | | |
-# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
-'''
- OpenShiftCLI class that wraps the oc commands in a subprocess
-'''
-
-import atexit
-import json
-import os
-import shutil
-import subprocess
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- '''return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-# pylint: disable=too-few-public-methods
-class OpenShiftCLI(object):
- ''' Class to wrap the command line tools '''
- def __init__(self,
- namespace,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftCLI '''
- self.namespace = namespace
- self.verbose = verbose
- self.kubeconfig = kubeconfig
-
- # Pylint allows only 5 arguments to be passed.
- # pylint: disable=too-many-arguments
- def _replace_content(self, resource, rname, content, force=False):
- ''' replace the current object with the content '''
- res = self._get(resource, rname)
- if not res['results']:
- return res
-
- fname = '/tmp/%s' % rname
- yed = Yedit(fname, res['results'][0])
- changes = []
- for key, value in content.items():
- changes.append(yed.put(key, value))
-
- if any([not change[0] for change in changes]):
- return {'returncode': 0, 'updated': False}
-
- yed.write()
-
- atexit.register(Utils.cleanup, [fname])
-
- return self._replace(fname, force)
-
- def _replace(self, fname, force=False):
- '''return all pods '''
- cmd = ['-n', self.namespace, 'replace', '-f', fname]
- if force:
- cmd.append('--force')
- return self.openshift_cmd(cmd)
-
- def _create(self, fname):
- '''return all pods '''
- return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
-
- def _delete(self, resource, rname):
- '''return all pods '''
- return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
-
- def _get(self, resource, rname=None):
- '''return a secret by name '''
- cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
- if rname:
- cmd.append(rname)
-
- rval = self.openshift_cmd(cmd, output=True)
-
- # Ensure results are retuned in an array
- if rval.has_key('items'):
- rval['results'] = rval['items']
- elif not isinstance(rval['results'], list):
- rval['results'] = [rval['results']]
-
- return rval
-
- def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
- '''Base command for oc '''
- #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = []
- if oadm:
- cmds = ['/usr/bin/oadm']
- else:
- cmds = ['/usr/bin/oc']
-
- cmds.extend(cmd)
-
- rval = {}
- results = ''
- err = None
-
- if self.verbose:
- print ' '.join(cmds)
-
- proc = subprocess.Popen(cmds,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env={'KUBECONFIG': self.kubeconfig})
-
- proc.wait()
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
- rval = {"returncode": proc.returncode,
- "results": results,
- "cmd": ' '.join(cmds),
- }
-
- if proc.returncode == 0:
- if output:
- if output_type == 'json':
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
- elif output_type == 'raw':
- rval['results'] = stdout
-
- if self.verbose:
- print stdout
- print stderr
- print
-
- if err:
- rval.update({"err": err,
- "stderr": stderr,
- "stdout": stdout,
- "cmd": cmds
- })
-
- else:
- rval.update({"stderr": stderr,
- "stdout": stdout,
- "results": {},
- })
-
- return rval
-
-class Utils(object):
- ''' utilities for openshiftcli modules '''
- @staticmethod
- def create_file(rname, data, ftype=None):
- ''' create a file in tmp with name and contents'''
- path = os.path.join('/tmp', rname)
- with open(path, 'w') as fds:
- if ftype == 'yaml':
- fds.write(yaml.safe_dump(data, default_flow_style=False))
-
- elif ftype == 'json':
- fds.write(json.dumps(data))
- else:
- fds.write(data)
-
- # Register cleanup when module is done
- atexit.register(Utils.cleanup, [path])
- return path
-
- @staticmethod
- def create_files_from_contents(data):
- '''Turn an array of dict: filename, content into a files array'''
- files = []
-
- for sfile in data:
- path = Utils.create_file(sfile['path'], sfile['content'])
- files.append(path)
-
- return files
-
- @staticmethod
- def cleanup(files):
- '''Clean up on exit '''
- for sfile in files:
- if os.path.exists(sfile):
- if os.path.isdir(sfile):
- shutil.rmtree(sfile)
- elif os.path.isfile(sfile):
- os.remove(sfile)
-
-
- @staticmethod
- def exists(results, _name):
- ''' Check to see if the results include the name '''
- if not results:
- return False
-
-
- if Utils.find_result(results, _name):
- return True
-
- return False
-
- @staticmethod
- def find_result(results, _name):
- ''' Find the specified result by name'''
- rval = None
- for result in results:
- if result.has_key('metadata') and result['metadata']['name'] == _name:
- rval = result
- break
-
- return rval
-
- @staticmethod
- def get_resource_file(sfile, sfile_type='yaml'):
- ''' return the service file '''
- contents = None
- with open(sfile) as sfd:
- contents = sfd.read()
-
- if sfile_type == 'yaml':
- contents = yaml.safe_load(contents)
- elif sfile_type == 'json':
- contents = json.loads(contents)
-
- return contents
-
- # Disabling too-many-branches. This is a yaml dictionary comparison function
- # pylint: disable=too-many-branches,too-many-return-statements
- @staticmethod
- def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
- ''' Given a user defined definition, compare it with the results given back by our query. '''
-
- # Currently these values are autogenerated and we do not need to check them
- skip = ['metadata', 'status']
- if skip_keys:
- skip.extend(skip_keys)
-
- for key, value in result_def.items():
- if key in skip:
- continue
-
- # Both are lists
- if isinstance(value, list):
- if not isinstance(user_def[key], list):
- if debug:
- print 'user_def[key] is not a list'
- return False
-
- for values in zip(user_def[key], value):
- if isinstance(values[0], dict) and isinstance(values[1], dict):
- if debug:
- print 'sending list - list'
- print type(values[0])
- print type(values[1])
- result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
- if not result:
- print 'list compare returned false'
- return False
-
- elif value != user_def[key]:
- if debug:
- print 'value should be identical'
- print value
- print user_def[key]
- return False
-
- # recurse on a dictionary
- elif isinstance(value, dict):
- if not isinstance(user_def[key], dict):
- if debug:
- print "dict returned false not instance of dict"
- return False
-
- # before passing ensure keys match
- api_values = set(value.keys()) - set(skip)
- user_values = set(user_def[key].keys()) - set(skip)
- if api_values != user_values:
- if debug:
- print api_values
- print user_values
- print "keys are not equal in dict"
- return False
-
- result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
- if not result:
- if debug:
- print "dict returned false"
- print result
- return False
-
- # Verify each key, value pair is the same
- else:
- if not user_def.has_key(key) or value != user_def[key]:
- if debug:
- print "value not equal; user_def does not have key"
- print value
- print user_def[key]
- return False
-
- return True
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
-
-class Secret(OpenShiftCLI):
- ''' Class to wrap the oc command line tools
- '''
- def __init__(self,
- namespace,
- secret_name=None,
- kubeconfig='/etc/origin/master/admin.kubeconfig',
- verbose=False):
- ''' Constructor for OpenshiftOC '''
- super(Secret, self).__init__(namespace, kubeconfig)
- self.namespace = namespace
- self.name = secret_name
- self.kubeconfig = kubeconfig
- self.verbose = verbose
-
- def get(self):
- '''return a secret by name '''
- return self._get('secrets', self.name)
-
- def delete(self):
- '''delete a secret by name'''
- return self._delete('secrets', self.name)
-
- def create(self, files=None, contents=None):
- '''Create a secret '''
- if not files:
- files = Utils.create_files_from_contents(contents)
-
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.openshift_cmd(cmd)
-
- def update(self, files, force=False):
- '''run update secret
-
- This receives a list of file names and converts it into a secret.
- The secret is then written to disk and passed into the `oc replace` command.
- '''
- secret = self.prep_secret(files)
- if secret['returncode'] != 0:
- return secret
-
- sfile_path = '/tmp/%s' % self.name
- with open(sfile_path, 'w') as sfd:
- sfd.write(json.dumps(secret['results']))
-
- atexit.register(Utils.cleanup, [sfile_path])
-
- return self._replace(sfile_path, force=force)
-
- def prep_secret(self, files=None, contents=None):
- ''' return what the secret would look like if created
- This is accomplished by passing -ojson. This will most likely change in the future
- '''
- if not files:
- files = Utils.create_files_from_contents(contents)
-
- secrets = ["%s=%s" % (os.path.basename(sfile), sfile) for sfile in files]
- cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
- cmd.extend(secrets)
-
- return self.openshift_cmd(cmd, output=True)
-
-
-
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- namespace=dict(default='default', type='str'),
- name=dict(default=None, type='str'),
- files=dict(default=None, type='list'),
- delete_after=dict(default=False, type='bool'),
- contents=dict(default=None, type='list'),
- force=dict(default=False, type='bool'),
- ),
- mutually_exclusive=[["contents", "files"]],
-
- supports_check_mode=True,
- )
- occmd = Secret(module.params['namespace'],
- module.params['name'],
- kubeconfig=module.params['kubeconfig'],
- verbose=module.params['debug'])
-
- state = module.params['state']
-
- api_rval = occmd.get()
-
- #####
- # Get
- #####
- if state == 'list':
- module.exit_json(changed=False, results=api_rval['results'], state="list")
-
- if not module.params['name']:
- module.fail_json(msg='Please specify a name when state is absent|present.')
- ########
- # Delete
- ########
- if state == 'absent':
- if not Utils.exists(api_rval['results'], module.params['name']):
- module.exit_json(changed=False, state="absent")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a delete.')
-
- api_rval = occmd.delete()
- module.exit_json(changed=True, results=api_rval, state="absent")
-
-
- if state == 'present':
- if module.params['files']:
- files = module.params['files']
- elif module.params['contents']:
- files = Utils.create_files_from_contents(module.params['contents'])
- else:
- module.fail_json(msg='Either specify files or contents.')
-
- ########
- # Create
- ########
- if not Utils.exists(api_rval['results'], module.params['name']):
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed a create.')
-
- api_rval = occmd.create(module.params['files'], module.params['contents'])
-
- # Remove files
- if files and module.params['delete_after']:
- Utils.cleanup(files)
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- ########
- # Update
- ########
- secret = occmd.prep_secret(module.params['files'], module.params['contents'])
-
- if secret['returncode'] != 0:
- module.fail_json(msg=secret)
-
- if Utils.check_def_equal(secret['results'], api_rval['results'][0]):
-
- # Remove files
- if files and module.params['delete_after']:
- Utils.cleanup(files)
-
- module.exit_json(changed=False, results=secret['results'], state="present")
-
- if module.check_mode:
- module.exit_json(change=False, msg='Would have performed an update.')
-
- api_rval = occmd.update(files, force=module.params['force'])
-
- # Remove files
- if secret and module.params['delete_after']:
- Utils.cleanup(files)
-
- if api_rval['returncode'] != 0:
- module.fail_json(msg=api_rval)
-
-
- module.exit_json(changed=True, results=api_rval, state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_timedatectl/library/timedatectl.py b/roles/lib_timedatectl/library/timedatectl.py
deleted file mode 100644
index b6eab5918..000000000
--- a/roles/lib_timedatectl/library/timedatectl.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-'''
- timedatectl ansible module
-
- This module supports setting ntp enabled
-'''
-import subprocess
-
-
-
-
-def do_timedatectl(options=None):
- ''' subprocess timedatectl '''
-
- cmd = ['/usr/bin/timedatectl']
- if options:
- cmd += options.split()
-
- proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE)
- proc.wait()
- return proc.stdout.read()
-
-def main():
- ''' Ansible module for timedatectl
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- #state=dict(default='enabled', type='str'),
- ntp=dict(default=True, type='bool'),
- ),
- #supports_check_mode=True
- )
-
- # do something
- ntp_enabled = False
-
- results = do_timedatectl()
-
- for line in results.split('\n'):
- if 'NTP enabled' in line:
- if 'yes' in line:
- ntp_enabled = True
-
- ########
- # Enable NTP
- ########
- if module.params['ntp']:
- if ntp_enabled:
- module.exit_json(changed=False, results="enabled", state="enabled")
-
- # Enable it
- # Commands to enable ntp
- else:
- results = do_timedatectl('set-ntp yes')
- module.exit_json(changed=True, results="enabled", state="enabled", cmdout=results)
-
- #########
- # Disable NTP
- #########
- else:
- if not ntp_enabled:
- module.exit_json(changed=False, results="disabled", state="disabled")
-
- results = do_timedatectl('set-ntp no')
- module.exit_json(changed=True, results="disabled", state="disabled")
-
- module.exit_json(failed=True, changed=False, results="Something went wrong", state="unknown")
-
-# Pylint is getting in the way of basic Ansible
-# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_yaml_editor/build/ansible/yedit.py b/roles/lib_yaml_editor/build/ansible/yedit.py
deleted file mode 100644
index dab3d6347..000000000
--- a/roles/lib_yaml_editor/build/ansible/yedit.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#pylint: skip-file
-
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- src=dict(default=None, type='str'),
- content=dict(default=None, type='dict'),
- key=dict(default=None, type='str'),
- value=dict(default=None, type='str'),
- value_format=dict(default='yaml', choices=['yaml', 'json'], type='str'),
- ),
- #mutually_exclusive=[["src", "content"]],
-
- supports_check_mode=True,
- )
- state = module.params['state']
-
- yamlfile = Yedit(module.params['src'], module.params['content'])
-
- rval = yamlfile.load()
- if not rval and state != 'present':
- module.fail_json(msg='Error opening file [%s]. Verify that the' + \
- ' file exists, that it is has correct permissions, and is valid yaml.')
-
- if state == 'list':
- module.exit_json(changed=False, results=rval, state="list")
-
- if state == 'absent':
- rval = yamlfile.delete(module.params['key'])
- module.exit_json(changed=rval[0], results=rval[1], state="absent")
-
- if state == 'present':
-
- if module.params['value_format'] == 'yaml':
- value = yaml.load(module.params['value'])
- elif module.params['value_format'] == 'json':
- value = json.loads(module.params['value'])
-
- if rval:
- rval = yamlfile.put(module.params['key'], value)
- if rval[0]:
- yamlfile.write()
- module.exit_json(changed=rval[0], results=rval[1], state="present")
-
- if not module.params['content']:
- rval = yamlfile.create(module.params['key'], value)
- else:
- rval = yamlfile.load()
- yamlfile.write()
-
- module.exit_json(changed=rval[0], results=rval[1], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_yaml_editor/build/generate.py b/roles/lib_yaml_editor/build/generate.py
deleted file mode 100755
index 312e4d0ee..000000000
--- a/roles/lib_yaml_editor/build/generate.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-'''
- Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
-'''
-
-import os
-
-# pylint: disable=anomalous-backslash-in-string
-GEN_STR = "#!/usr/bin/env python\n" + \
- "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
- "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
- "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
- "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
- "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
- "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
- "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
-
-OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
-
-FILES = {'yedit.py': ['src/base.py', 'src/yedit.py', 'ansible/yedit.py'],
- }
-
-def main():
- ''' combine the necessary files to create the ansible module '''
- library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
- for fname, parts in FILES.items():
- with open(os.path.join(library, fname), 'w') as afd:
- afd.seek(0)
- afd.write(GEN_STR)
- for fpart in parts:
- with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
- # first line is pylint disable so skip it
- for idx, line in enumerate(pfd):
- if idx == 0 and 'skip-file' in line:
- continue
-
- afd.write(line)
-
-
-if __name__ == '__main__':
- main()
-
-
diff --git a/roles/lib_yaml_editor/build/src/base.py b/roles/lib_yaml_editor/build/src/base.py
deleted file mode 100644
index 9e43d45dc..000000000
--- a/roles/lib_yaml_editor/build/src/base.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# pylint: skip-file
-
-'''
-module for managing yaml files
-'''
-
-import os
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- ''' return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
diff --git a/roles/lib_yaml_editor/build/src/yedit.py b/roles/lib_yaml_editor/build/src/yedit.py
deleted file mode 100644
index 642733914..000000000
--- a/roles/lib_yaml_editor/build/src/yedit.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# pylint: skip-file
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
diff --git a/roles/lib_yaml_editor/build/test/foo.yml b/roles/lib_yaml_editor/build/test/foo.yml
deleted file mode 100644
index 20e9ff3fe..000000000
--- a/roles/lib_yaml_editor/build/test/foo.yml
+++ /dev/null
@@ -1 +0,0 @@
-foo: bar
diff --git a/roles/lib_yaml_editor/build/test/test.yaml b/roles/lib_yaml_editor/build/test/test.yaml
deleted file mode 100755
index ac9c37565..000000000
--- a/roles/lib_yaml_editor/build/test/test.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: localhost
- gather_facts: no
- tasks:
- - yedit:
- src: /home/kwoodson/git/openshift-ansible/roles/lib_yaml_editor/build/test/foo.yml
- key: foo
- value: barplus
- state: present
- register: output
-
- - debug:
- msg: "{{ output }}"
-
diff --git a/roles/lib_yaml_editor/library/yedit.py b/roles/lib_yaml_editor/library/yedit.py
deleted file mode 100644
index b7ae45b31..000000000
--- a/roles/lib_yaml_editor/library/yedit.py
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/usr/bin/env python
-# ___ ___ _ _ ___ ___ _ _____ ___ ___
-# / __| __| \| | __| _ \ /_\_ _| __| \
-# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
-# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
-# | |) | (_) | | .` | (_) || | | _|| |) | | | |
-# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
-
-'''
-module for managing yaml files
-'''
-
-import os
-import re
-
-import yaml
-# This is here because of a bug that causes yaml
-# to incorrectly handle timezone info on timestamps
-def timestamp_constructor(_, node):
- ''' return timestamps as strings'''
- return str(node.value)
-yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
-
-
-class YeditException(Exception):
- ''' Exception class for Yedit '''
- pass
-
-class Yedit(object):
- ''' Class to modify yaml files '''
- re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
- re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
-
- def __init__(self, filename=None, content=None, content_type='yaml'):
- self.content = content
- self.filename = filename
- self.__yaml_dict = content
- self.content_type = content_type
- if self.filename and not self.content:
- self.load(content_type=self.content_type)
-
- @property
- def yaml_dict(self):
- ''' getter method for yaml_dict '''
- return self.__yaml_dict
-
- @yaml_dict.setter
- def yaml_dict(self, value):
- ''' setter method for yaml_dict '''
- self.__yaml_dict = value
-
- @staticmethod
- def remove_entry(data, key):
- ''' remove data at location key '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for remove
- # expected list entry
- if key_indexes[-1][0]:
- if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- del data[int(key_indexes[-1][0])]
- return True
-
- # expected dict entry
- elif key_indexes[-1][1]:
- if isinstance(data, dict):
- del data[key_indexes[-1][1]]
- return True
-
- @staticmethod
- def add_entry(data, key, item=None):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- curr_data = data
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes[:-1]:
- if dict_key:
- if isinstance(data, dict) and data.has_key(dict_key):
- data = data[dict_key]
- continue
-
- data[dict_key] = {}
- data = data[dict_key]
-
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- # process last index for add
- # expected list entry
- if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
- data[int(key_indexes[-1][0])] = item
-
- # expected dict entry
- elif key_indexes[-1][1] and isinstance(data, dict):
- data[key_indexes[-1][1]] = item
-
- return curr_data
-
- @staticmethod
- def get_entry(data, key):
- ''' Get an item from a dictionary with key notation a.b.c
- d = {'a': {'b': 'c'}}}
- key = a.b
- return c
- '''
- if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
- return None
-
- key_indexes = re.findall(Yedit.re_key, key)
- for arr_ind, dict_key in key_indexes:
- if dict_key and isinstance(data, dict):
- data = data.get(dict_key, None)
- elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
- data = data[int(arr_ind)]
- else:
- return None
-
- return data
-
- def write(self):
- ''' write to file '''
- if not self.filename:
- raise YeditException('Please specify a filename.')
-
- with open(self.filename, 'w') as yfd:
- yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
-
- def read(self):
- ''' write to file '''
- # check if it exists
- if not self.exists():
- return None
-
- contents = None
- with open(self.filename) as yfd:
- contents = yfd.read()
-
- return contents
-
- def exists(self):
- ''' return whether file exists '''
- if os.path.exists(self.filename):
- return True
-
- return False
-
- def load(self, content_type='yaml'):
- ''' return yaml file '''
- contents = self.read()
-
- if not contents:
- return None
-
- # check if it is yaml
- try:
- if content_type == 'yaml':
- self.yaml_dict = yaml.load(contents)
- elif content_type == 'json':
- self.yaml_dict = json.loads(contents)
- except yaml.YAMLError as _:
- # Error loading yaml or json
- return None
-
- return self.yaml_dict
-
- def get(self, key):
- ''' get a specified key'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- return entry
-
- def delete(self, key):
- ''' remove key from a dict'''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
- if not entry:
- return (False, self.yaml_dict)
-
- result = Yedit.remove_entry(self.yaml_dict, key)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def put(self, key, value):
- ''' put key, value into a dict '''
- try:
- entry = Yedit.get_entry(self.yaml_dict, key)
- except KeyError as _:
- entry = None
-
- if entry == value:
- return (False, self.yaml_dict)
-
- result = Yedit.add_entry(self.yaml_dict, key, value)
- if not result:
- return (False, self.yaml_dict)
-
- return (True, self.yaml_dict)
-
- def create(self, key, value):
- ''' create a yaml file '''
- if not self.exists():
- self.yaml_dict = {key: value}
- return (True, self.yaml_dict)
-
- return (False, self.yaml_dict)
-
-def main():
- '''
- ansible oc module for secrets
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', type='str',
- choices=['present', 'absent', 'list']),
- debug=dict(default=False, type='bool'),
- src=dict(default=None, type='str'),
- content=dict(default=None, type='dict'),
- key=dict(default=None, type='str'),
- value=dict(default=None, type='str'),
- value_format=dict(default='yaml', choices=['yaml', 'json'], type='str'),
- ),
- #mutually_exclusive=[["src", "content"]],
-
- supports_check_mode=True,
- )
- state = module.params['state']
-
- yamlfile = Yedit(module.params['src'], module.params['content'])
-
- rval = yamlfile.load()
- if not rval and state != 'present':
- module.fail_json(msg='Error opening file [%s]. Verify that the' + \
- ' file exists, that it is has correct permissions, and is valid yaml.')
-
- if state == 'list':
- module.exit_json(changed=False, results=rval, state="list")
-
- if state == 'absent':
- rval = yamlfile.delete(module.params['key'])
- module.exit_json(changed=rval[0], results=rval[1], state="absent")
-
- if state == 'present':
-
- if module.params['value_format'] == 'yaml':
- value = yaml.load(module.params['value'])
- elif module.params['value_format'] == 'json':
- value = json.loads(module.params['value'])
-
- if rval:
- rval = yamlfile.put(module.params['key'], value)
- if rval[0]:
- yamlfile.write()
- module.exit_json(changed=rval[0], results=rval[1], state="present")
-
- if not module.params['content']:
- rval = yamlfile.create(module.params['key'], value)
- else:
- rval = yamlfile.load()
- yamlfile.write()
-
- module.exit_json(changed=rval[0], results=rval[1], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/README.md b/roles/lib_zabbix/README.md
deleted file mode 100644
index 69debc698..000000000
--- a/roles/lib_zabbix/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-zabbix
-=========
-
-Automate zabbix tasks.
-
-Requirements
-------------
-
-This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
-
-Role Variables
---------------
-
-None
-
-Dependencies
-------------
-
-This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
-
-Example Playbook
-----------------
-
- - zbx_host:
- server: zab_server
- user: zab_user
- password: zab_password
- name: 'myhost'
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/lib_zabbix/library/__init__.py b/roles/lib_zabbix/library/__init__.py
deleted file mode 100644
index 0c7e19e41..000000000
--- a/roles/lib_zabbix/library/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-'''
-ZabbixAPI ansible module
-'''
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
deleted file mode 100644
index 499084942..000000000
--- a/roles/lib_zabbix/library/zbx_action.py
+++ /dev/null
@@ -1,690 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
- Ansible module for zabbix actions
-'''
-#
-# Zabbix action ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
-
-CUSTOM_SCRIPT_ACTION = '0'
-IPMI_ACTION = '1'
-SSH_ACTION = '2'
-TELNET_ACTION = '3'
-GLOBAL_SCRIPT_ACTION = '4'
-
-EXECUTE_ON_ZABBIX_AGENT = '0'
-EXECUTE_ON_ZABBIX_SERVER = '1'
-
-OPERATION_REMOTE_COMMAND = '1'
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def conditions_equal(zab_conditions, user_conditions):
- '''Compare two lists of conditions'''
- c_type = 'conditiontype'
- _op = 'operator'
- val = 'value'
- if len(user_conditions) != len(zab_conditions):
- return False
-
- for zab_cond, user_cond in zip(zab_conditions, user_conditions):
- if zab_cond[c_type] != str(user_cond[c_type]) or zab_cond[_op] != str(user_cond[_op]) or \
- zab_cond[val] != str(user_cond[val]):
- return False
-
- return True
-
-def filter_differences(zabbix_filters, user_filters):
- '''Determine the differences from user and zabbix for operations'''
- rval = {}
- for key, val in user_filters.items():
-
- if key == 'conditions':
- if not conditions_equal(zabbix_filters[key], val):
- rval[key] = val
-
- elif zabbix_filters[key] != str(val):
- rval[key] = val
-
- return rval
-
-def opconditions_diff(zab_val, user_val):
- ''' Report whether there are differences between opconditions on
- zabbix and opconditions supplied by user '''
-
- if len(zab_val) != len(user_val):
- return True
-
- for z_cond, u_cond in zip(zab_val, user_val):
- if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
- ['conditiontype', 'operator', 'value']]):
- return True
-
- return False
-
-def opmessage_diff(zab_val, user_val):
- ''' Report whether there are differences between opmessage on
- zabbix and opmessage supplied by user '''
-
- for op_msg_key, op_msg_val in user_val.items():
- if zab_val[op_msg_key] != str(op_msg_val):
- return True
-
- return False
-
-def opmessage_grp_diff(zab_val, user_val):
- ''' Report whether there are differences between opmessage_grp
- on zabbix and opmessage_grp supplied by user '''
-
- zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val])
- usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val])
- if usr_grp_ids != zab_grp_ids:
- return True
-
- return False
-
-def opmessage_usr_diff(zab_val, user_val):
- ''' Report whether there are differences between opmessage_usr
- on zabbix and opmessage_usr supplied by user '''
-
- zab_usr_ids = set([usr['userid'] for usr in zab_val])
- usr_ids = set([usr['userid'] for usr in user_val])
- if usr_ids != zab_usr_ids:
- return True
-
- return False
-
-def opcommand_diff(zab_op_cmd, usr_op_cmd):
- ''' Check whether user-provided opcommand matches what's already
- stored in Zabbix '''
-
- for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
- if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
- return True
- return False
-
-def host_in_zabbix(zab_hosts, usr_host):
- ''' Check whether a particular user host is already in the
- Zabbix list of hosts '''
-
- for usr_hst_key, usr_hst_val in usr_host.items():
- for zab_host in zab_hosts:
- if usr_hst_key in zab_host and \
- zab_host[usr_hst_key] == str(usr_hst_val):
- return True
-
- return False
-
-def hostlist_in_zabbix(zab_hosts, usr_hosts):
- ''' Check whether user-provided list of hosts are already in
- the Zabbix action '''
-
- if len(zab_hosts) != len(usr_hosts):
- return False
-
- for usr_host in usr_hosts:
- if not host_in_zabbix(zab_hosts, usr_host):
- return False
-
- return True
-
-# We are comparing two lists of dictionaries (the one stored on zabbix and the
-# one the user is providing). For each type of operation, determine whether there
-# is a difference between what is stored on zabbix and what the user is providing.
-# If there is a difference, we take the user-provided data for what needs to
-# be stored/updated into zabbix.
-def operation_differences(zabbix_ops, user_ops):
- '''Determine the differences from user and zabbix for operations'''
-
- # if they don't match, take the user options
- if len(zabbix_ops) != len(user_ops):
- return user_ops
-
- rval = {}
- for zab, user in zip(zabbix_ops, user_ops):
- for oper in user.keys():
- if oper == 'opconditions' and opconditions_diff(zab[oper], \
- user[oper]):
- rval[oper] = user[oper]
-
- elif oper == 'opmessage' and opmessage_diff(zab[oper], \
- user[oper]):
- rval[oper] = user[oper]
-
- elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \
- user[oper]):
- rval[oper] = user[oper]
-
- elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \
- user[oper]):
- rval[oper] = user[oper]
-
- elif oper == 'opcommand' and opcommand_diff(zab[oper], \
- user[oper]):
- rval[oper] = user[oper]
-
- # opcommand_grp can be treated just like opcommand_hst
- # as opcommand_grp[] is just a list of groups
- elif oper == 'opcommand_hst' or oper == 'opcommand_grp':
- if not hostlist_in_zabbix(zab[oper], user[oper]):
- rval[oper] = user[oper]
-
- # if it's any other type of operation than the ones tested above
- # just do a direct compare
- elif oper not in ['opconditions', 'opmessage', 'opmessage_grp',
- 'opmessage_usr', 'opcommand', 'opcommand_hst',
- 'opcommand_grp'] \
- and str(zab[oper]) != str(user[oper]):
- rval[oper] = user[oper]
-
- return rval
-
-def get_users(zapi, users):
- '''get the mediatype id from the mediatype name'''
- rval_users = []
-
- for user in users:
- content = zapi.get_content('user',
- 'get',
- {'filter': {'alias': user}})
- rval_users.append({'userid': content['result'][0]['userid']})
-
- return rval_users
-
-def get_user_groups(zapi, groups):
- '''get the mediatype id from the mediatype name'''
- user_groups = []
-
- for group in groups:
- content = zapi.get_content('usergroup',
- 'get',
- {'search': {'name': group}})
- for result in content['result']:
- user_groups.append({'usrgrpid': result['usrgrpid']})
-
- return user_groups
-
-def get_mediatype_id_by_name(zapi, m_name):
- '''get the mediatype id from the mediatype name'''
- content = zapi.get_content('mediatype',
- 'get',
- {'filter': {'description': m_name}})
-
- return content['result'][0]['mediatypeid']
-
-def get_priority(priority):
- ''' determine priority
- '''
- prior = 0
- if 'info' in priority:
- prior = 1
- elif 'warn' in priority:
- prior = 2
- elif 'avg' == priority or 'ave' in priority:
- prior = 3
- elif 'high' in priority:
- prior = 4
- elif 'dis' in priority:
- prior = 5
-
- return prior
-
-def get_event_source(from_src):
- '''Translate even str into value'''
- choices = ['trigger', 'discovery', 'auto', 'internal']
- rval = 0
- try:
- rval = choices.index(from_src)
- except ValueError as _:
- ZabbixAPIError('Value not found for event source [%s]' % from_src)
-
- return rval
-
-def get_status(inc_status):
- '''determine status for action'''
- rval = 1
- if inc_status == 'enabled':
- rval = 0
-
- return rval
-
-def get_condition_operator(inc_operator):
- ''' determine the condition operator'''
- vals = {'=': 0,
- '<>': 1,
- 'like': 2,
- 'not like': 3,
- 'in': 4,
- '>=': 5,
- '<=': 6,
- 'not in': 7,
- }
-
- return vals[inc_operator]
-
-def get_host_id_by_name(zapi, host_name):
- '''Get host id by name'''
- content = zapi.get_content('host',
- 'get',
- {'filter': {'name': host_name}})
-
- return content['result'][0]['hostid']
-
-def get_trigger_value(inc_trigger):
- '''determine the proper trigger value'''
- rval = 1
- if inc_trigger == 'PROBLEM':
- rval = 1
- else:
- rval = 0
-
- return rval
-
-def get_template_id_by_name(zapi, t_name):
- '''get the template id by name'''
- content = zapi.get_content('template',
- 'get',
- {'filter': {'host': t_name}})
-
- return content['result'][0]['templateid']
-
-
-def get_host_group_id_by_name(zapi, hg_name):
- '''Get hostgroup id by name'''
- content = zapi.get_content('hostgroup',
- 'get',
- {'filter': {'name': hg_name}})
-
- return content['result'][0]['groupid']
-
-def get_condition_type(event_source, inc_condition):
- '''determine the condition type'''
- c_types = {}
- if event_source == 'trigger':
- c_types = {'host group': 0,
- 'host': 1,
- 'trigger': 2,
- 'trigger name': 3,
- 'trigger severity': 4,
- 'trigger value': 5,
- 'time period': 6,
- 'host template': 13,
- 'application': 15,
- 'maintenance status': 16,
- }
-
- elif event_source == 'discovery':
- c_types = {'host IP': 7,
- 'discovered service type': 8,
- 'discovered service port': 9,
- 'discovery status': 10,
- 'uptime or downtime duration': 11,
- 'received value': 12,
- 'discovery rule': 18,
- 'discovery check': 19,
- 'proxy': 20,
- 'discovery object': 21,
- }
-
- elif event_source == 'auto':
- c_types = {'proxy': 20,
- 'host name': 22,
- 'host metadata': 24,
- }
-
- elif event_source == 'internal':
- c_types = {'host group': 0,
- 'host': 1,
- 'host template': 13,
- 'application': 15,
- 'event type': 23,
- }
- else:
- raise ZabbixAPIError('Unkown event source %s' % event_source)
-
- return c_types[inc_condition]
-
-def get_operation_type(inc_operation):
- ''' determine the correct operation type'''
- o_types = {'send message': 0,
- 'remote command': OPERATION_REMOTE_COMMAND,
- 'add host': 2,
- 'remove host': 3,
- 'add to host group': 4,
- 'remove from host group': 5,
- 'link to template': 6,
- 'unlink from template': 7,
- 'enable host': 8,
- 'disable host': 9,
- }
-
- return o_types[inc_operation]
-
-def get_opcommand_type(opcommand_type):
- ''' determine the opcommand type '''
- oc_types = {'custom script': CUSTOM_SCRIPT_ACTION,
- 'IPMI': IPMI_ACTION,
- 'SSH': SSH_ACTION,
- 'Telnet': TELNET_ACTION,
- 'global script': GLOBAL_SCRIPT_ACTION,
- }
-
- return oc_types[opcommand_type]
-
-def get_execute_on(execute_on):
- ''' determine the execution target '''
- e_types = {'zabbix agent': EXECUTE_ON_ZABBIX_AGENT,
- 'zabbix server': EXECUTE_ON_ZABBIX_SERVER,
- }
-
- return e_types[execute_on]
-
-def action_remote_command(ansible_module, zapi, operation):
- ''' Process remote command type of actions '''
-
- if 'type' not in operation['opcommand']:
- ansible_module.exit_json(failed=True, changed=False, state='unknown',
- results="No Operation Type provided")
-
- operation['opcommand']['type'] = get_opcommand_type(operation['opcommand']['type'])
-
- if operation['opcommand']['type'] == CUSTOM_SCRIPT_ACTION:
-
- if 'execute_on' in operation['opcommand']:
- operation['opcommand']['execute_on'] = get_execute_on(operation['opcommand']['execute_on'])
-
- # custom script still requires the target hosts/groups to be set
- operation['opcommand_hst'] = []
- operation['opcommand_grp'] = []
- for usr_host in operation['target_hosts']:
- if usr_host['target_type'] == 'zabbix server':
- # 0 = target host local/current host
- operation['opcommand_hst'].append({'hostid': 0})
- elif usr_host['target_type'] == 'group':
- group_name = usr_host['target']
- gid = get_host_group_id_by_name(zapi, group_name)
- operation['opcommand_grp'].append({'groupid': gid})
- elif usr_host['target_type'] == 'host':
- host_name = usr_host['target']
- hid = get_host_id_by_name(zapi, host_name)
- operation['opcommand_hst'].append({'hostid': hid})
-
- # 'target_hosts' is just to make it easier to build zbx_actions
- # not part of ZabbixAPI
- del operation['target_hosts']
- else:
- ansible_module.exit_json(failed=True, changed=False, state='unknown',
- results="Unsupported remote command type")
-
-
-def get_action_operations(ansible_module, zapi, inc_operations):
- '''Convert the operations into syntax for api'''
- for operation in inc_operations:
- operation['operationtype'] = get_operation_type(operation['operationtype'])
- if operation['operationtype'] == 0: # send message. Need to fix the
- operation['opmessage']['mediatypeid'] = \
- get_mediatype_id_by_name(zapi, operation['opmessage']['mediatypeid'])
- operation['opmessage_grp'] = get_user_groups(zapi, operation.get('opmessage_grp', []))
- operation['opmessage_usr'] = get_users(zapi, operation.get('opmessage_usr', []))
- if operation['opmessage']['default_msg']:
- operation['opmessage']['default_msg'] = 1
- else:
- operation['opmessage']['default_msg'] = 0
-
- elif operation['operationtype'] == OPERATION_REMOTE_COMMAND:
- action_remote_command(ansible_module, zapi, operation)
-
- # Handle Operation conditions:
- # Currently there is only 1 available which
- # is 'event acknowledged'. In the future
- # if there are any added we will need to pass this
- # option to a function and return the correct conditiontype
- if operation.has_key('opconditions'):
- for condition in operation['opconditions']:
- if condition['conditiontype'] == 'event acknowledged':
- condition['conditiontype'] = 14
-
- if condition['operator'] == '=':
- condition['operator'] = 0
-
- if condition['value'] == 'acknowledged':
- condition['value'] = 1
- else:
- condition['value'] = 0
-
-
- return inc_operations
-
-def get_operation_evaltype(inc_type):
- '''get the operation evaltype'''
- rval = 0
- if inc_type == 'and/or':
- rval = 0
- elif inc_type == 'and':
- rval = 1
- elif inc_type == 'or':
- rval = 2
- elif inc_type == 'custom':
- rval = 3
-
- return rval
-
-def get_action_conditions(zapi, event_source, inc_conditions):
- '''Convert the conditions into syntax for api'''
-
- calc_type = inc_conditions.pop('calculation_type')
- inc_conditions['evaltype'] = get_operation_evaltype(calc_type)
- for cond in inc_conditions['conditions']:
-
- cond['operator'] = get_condition_operator(cond['operator'])
- # Based on conditiontype we need to set the proper value
- # e.g. conditiontype = hostgroup then the value needs to be a hostgroup id
- # e.g. conditiontype = host the value needs to be a host id
- cond['conditiontype'] = get_condition_type(event_source, cond['conditiontype'])
- if cond['conditiontype'] == 0:
- cond['value'] = get_host_group_id_by_name(zapi, cond['value'])
- elif cond['conditiontype'] == 1:
- cond['value'] = get_host_id_by_name(zapi, cond['value'])
- elif cond['conditiontype'] == 4:
- cond['value'] = get_priority(cond['value'])
-
- elif cond['conditiontype'] == 5:
- cond['value'] = get_trigger_value(cond['value'])
- elif cond['conditiontype'] == 13:
- cond['value'] = get_template_id_by_name(zapi, cond['value'])
- elif cond['conditiontype'] == 16:
- cond['value'] = ''
-
- return inc_conditions
-
-
-def get_send_recovery(send_recovery):
- '''Get the integer value'''
- rval = 0
- if send_recovery:
- rval = 1
-
- return rval
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_item
- '''
-
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
-
- name=dict(default=None, type='str'),
- event_source=dict(default='trigger', choices=['trigger', 'discovery', 'auto', 'internal'], type='str'),
- action_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
- action_message=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}\r\n" +
- "Last value: {ITEM.LASTVALUE}\r\n\r\n{TRIGGER.URL}", type='str'),
- reply_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
- reply_message=dict(default="Trigger: {TRIGGER.NAME}\r\nTrigger status: {TRIGGER.STATUS}\r\n" +
- "Trigger severity: {TRIGGER.SEVERITY}\r\nTrigger URL: {TRIGGER.URL}\r\n\r\n" +
- "Item values:\r\n\r\n1. {ITEM.NAME1} ({HOST.NAME1}:{ITEM.KEY1}): " +
- "{ITEM.VALUE1}\r\n2. {ITEM.NAME2} ({HOST.NAME2}:{ITEM.KEY2}): " +
- "{ITEM.VALUE2}\r\n3. {ITEM.NAME3} ({HOST.NAME3}:{ITEM.KEY3}): " +
- "{ITEM.VALUE3}", type='str'),
- send_recovery=dict(default=False, type='bool'),
- status=dict(default=None, type='str'),
- escalation_time=dict(default=60, type='int'),
- conditions_filter=dict(default=None, type='dict'),
- operations=dict(default=None, type='list'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'action'
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'name': module.params['name']},
- 'selectFilter': 'extend',
- 'selectOperations': 'extend',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['actionid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
- operations = get_action_operations(module, zapi,
- module.params['operations'])
- params = {'name': module.params['name'],
- 'esc_period': module.params['escalation_time'],
- 'eventsource': get_event_source(module.params['event_source']),
- 'status': get_status(module.params['status']),
- 'def_shortdata': module.params['action_subject'],
- 'def_longdata': module.params['action_message'],
- 'r_shortdata': module.params['reply_subject'],
- 'r_longdata': module.params['reply_message'],
- 'recovery_msg': get_send_recovery(module.params['send_recovery']),
- 'filter': conditions,
- 'operations': operations,
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- _ = params.pop('hostid', None)
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'operations':
- ops = operation_differences(zab_results[key], value)
- if ops:
- differences[key] = ops
-
- elif key == 'filter':
- filters = filter_differences(zab_results[key], value)
- if filters:
- differences[key] = filters
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update.
- # action update requires an id, filters, and operations
- differences['actionid'] = zab_results['actionid']
- differences['operations'] = params['operations']
- differences['filter'] = params['filter']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_application.py b/roles/lib_zabbix/library/zbx_application.py
deleted file mode 100644
index 472390071..000000000
--- a/roles/lib_zabbix/library/zbx_application.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-'''
-Ansible module for application
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix application ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_template_ids(zapi, template_name):
- '''
- get related templates
- '''
- template_ids = []
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name}})
- if content.has_key('result'):
- template_ids.append(content['result'][0]['templateid'])
- return template_ids
-
-def main():
- ''' Ansible module for application
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str', required=True),
- template_name=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the application for the rest of the calls
- zbx_class_name = 'application'
- idname = 'applicationid'
- aname = module.params['name']
- state = module.params['state']
- # get a applicationid, see if it exists
- tids = get_template_ids(zapi, module.params['template_name'])
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'name': aname},
- 'templateids': tids[0],
- })
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- params = {'hostid': tids[0],
- 'name': aname,
- }
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
- if key == 'templates' and zab_results.has_key('parentTemplates'):
- if zab_results['parentTemplates'] != value:
- differences[key] = value
- elif zab_results[key] != str(value) and zab_results[key] != value:
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=content['result'], state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_discoveryrule.py b/roles/lib_zabbix/library/zbx_discoveryrule.py
deleted file mode 100644
index 7c5f98397..000000000
--- a/roles/lib_zabbix/library/zbx_discoveryrule.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/env python
-'''
-Zabbix discovery rule ansible module
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_template(zapi, template_name):
- '''get a template by name
- '''
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name},
- 'output': 'extend',
- 'selectInterfaces': 'interfaceid',
- })
- if not content['result']:
- return None
- return content['result'][0]
-
-def get_type(vtype):
- '''
- Determine which type of discoverrule this is
- '''
- _types = {'agent': 0,
- 'SNMPv1': 1,
- 'trapper': 2,
- 'simple': 3,
- 'SNMPv2': 4,
- 'internal': 5,
- 'SNMPv3': 6,
- 'active': 7,
- 'external': 10,
- 'database monitor': 11,
- 'ipmi': 12,
- 'ssh': 13,
- 'telnet': 14,
- 'JMX': 16,
- }
-
- for typ in _types.keys():
- if vtype in typ or vtype == typ:
- _vtype = _types[typ]
- break
- else:
- _vtype = 2
-
- return _vtype
-
-def main():
- '''
- Ansible module for zabbix discovery rules
- '''
-
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- key=dict(default=None, type='str'),
- description=dict(default=None, type='str'),
- interfaceid=dict(default=None, type='int'),
- ztype=dict(default='trapper', type='str'),
- delay=dict(default=60, type='int'),
- lifetime=dict(default=30, type='int'),
- template_name=dict(default=[], type='list'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'discoveryrule'
- idname = "itemid"
- dname = module.params['name']
- state = module.params['state']
- template = get_template(zapi, module.params['template_name'])
-
- # selectInterfaces doesn't appear to be working but is needed.
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'name': dname},
- 'templateids': template['templateid'],
- #'selectDServices': 'extend',
- #'selectDChecks': 'extend',
- #'selectDhosts': 'dhostid',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
-
- # Create and Update
- if state == 'present':
- params = {'name': dname,
- 'key_': module.params['key'],
- 'hostid': template['templateid'],
- 'interfaceid': module.params['interfaceid'],
- 'lifetime': module.params['lifetime'],
- 'type': get_type(module.params['ztype']),
- 'description': module.params['description'],
- }
- if params['type'] in [2, 5, 7, 11]:
- params.pop('interfaceid')
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_graph.py b/roles/lib_zabbix/library/zbx_graph.py
deleted file mode 100644
index 71f4e1264..000000000
--- a/roles/lib_zabbix/library/zbx_graph.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for zabbix graphs
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix graphs ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#---
-#- hosts: localhost
-# gather_facts: no
-# tasks:
-# - zbx_graph:
-# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
-# zbx_user: Admin
-# zbx_password: zabbix
-# name: Test Graph
-# height: 300
-# width: 500
-# graph_items:
-# - item_name: openshift.master.etcd.create.fail
-# color: red
-# line_style: bold
-# - item_name: openshift.master.etcd.create.success
-# color: red
-# line_style: bold
-#
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_graph_type(graphtype):
- '''
- Possible values:
- 0 - normal;
- 1 - stacked;
- 2 - pie;
- 3 - exploded;
- '''
- gtype = 0
- if 'stacked' in graphtype:
- gtype = 1
- elif 'pie' in graphtype:
- gtype = 2
- elif 'exploded' in graphtype:
- gtype = 3
-
- return gtype
-
-def get_show_legend(show_legend):
- '''Get the value for show_legend
- 0 - hide
- 1 - (default) show
- '''
- rval = 1
- if 'hide' == show_legend:
- rval = 0
-
- return rval
-
-def get_template_id(zapi, template_name):
- '''
- get related templates
- '''
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'filter': {'host': template_name},})
-
- if content.has_key('result'):
- return content['result'][0]['templateid']
-
- return None
-
-def get_color(color_in):
- ''' Receive a color and translate it to a hex representation of the color
-
- Will have a few setup by default
- '''
- colors = {'black': '000000',
- 'red': 'FF0000',
- 'pink': 'FFC0CB',
- 'purple': '800080',
- 'orange': 'FFA500',
- 'gold': 'FFD700',
- 'yellow': 'FFFF00',
- 'green': '008000',
- 'cyan': '00FFFF',
- 'aqua': '00FFFF',
- 'blue': '0000FF',
- 'brown': 'A52A2A',
- 'gray': '808080',
- 'grey': '808080',
- 'silver': 'C0C0C0',
- }
- if colors.has_key(color_in):
- return colors[color_in]
-
- return color_in
-
-def get_line_style(style):
- '''determine the line style
- '''
- line_style = {'line': 0,
- 'filled': 1,
- 'bold': 2,
- 'dot': 3,
- 'dashed': 4,
- 'gradient': 5,
- }
-
- if line_style.has_key(style):
- return line_style[style]
-
- return 0
-
-def get_calc_function(func):
- '''Determine the caclulation function'''
- rval = 2 # default to avg
- if 'min' in func:
- rval = 1
- elif 'max' in func:
- rval = 4
- elif 'all' in func:
- rval = 7
- elif 'last' in func:
- rval = 9
-
- return rval
-
-def get_graph_item_type(gtype):
- '''Determine the graph item type
- '''
- rval = 0 # simple graph type
- if 'sum' in gtype:
- rval = 2
-
- return rval
-
-def get_graph_items(zapi, gitems):
- '''Get graph items by id'''
-
- r_items = []
- for item in gitems:
- content = zapi.get_content('item',
- 'get',
- {'filter': {'name': item['item_name']}})
- _ = item.pop('item_name')
- color = get_color(item.pop('color'))
- drawtype = get_line_style(item.get('line_style', 'line'))
- func = get_calc_function(item.get('calc_func', 'avg'))
- g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
-
- if content.has_key('result'):
- tmp = {'itemid': content['result'][0]['itemid'],
- 'color': color,
- 'drawtype': drawtype,
- 'calc_fnc': func,
- 'type': g_type,
- }
- r_items.append(tmp)
-
- return r_items
-
-def compare_gitems(zabbix_items, user_items):
- '''Compare zabbix results with the user's supplied items
- return True if user_items are equal
- return False if any of the values differ
- '''
- if len(zabbix_items) != len(user_items):
- return False
-
- for u_item in user_items:
- for z_item in zabbix_items:
- if u_item['itemid'] == z_item['itemid']:
- if not all([str(value) == z_item[key] for key, value in u_item.items()]):
- return False
-
- return True
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_graphs
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- height=dict(default=None, type='int'),
- width=dict(default=None, type='int'),
- graph_type=dict(default='normal', type='str'),
- show_legend=dict(default='show', type='str'),
- state=dict(default='present', type='str'),
- graph_items=dict(default=None, type='list'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'graph'
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'filter': {'name': module.params['name']},
- #'templateids': templateid,
- 'selectGraphItems': 'extend',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- params = {'name': module.params['name'],
- 'height': module.params['height'],
- 'width': module.params['width'],
- 'graphtype': get_graph_type(module.params['graph_type']),
- 'show_legend': get_show_legend(module.params['show_legend']),
- 'gitems': get_graph_items(zapi, module.params['graph_items']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'gitems':
- if not compare_gitems(zab_results[key], value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences['graphid'] = zab_results['graphid']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_graphprototype.py b/roles/lib_zabbix/library/zbx_graphprototype.py
deleted file mode 100644
index d63873b00..000000000
--- a/roles/lib_zabbix/library/zbx_graphprototype.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for zabbix graphprototypes
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix graphprototypes ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#---
-#- hosts: localhost
-# gather_facts: no
-# tasks:
-# - zbx_graphprototype:
-# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
-# zbx_user: Admin
-# zbx_password: zabbix
-# name: Test Graph
-# height: 300
-# width: 500
-# graph_items:
-# - item_name: Bytes per second IN on network interface {#OSO_NET_INTERFACE}
-# color: red
-# line_style: bold
-# item_type: prototype
-# - item_name: Template OS Linux: Bytes per second OUT on network interface {#OSO_NET_INTERFACE}
-# item_type: prototype
-#
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_graph_type(graphtype):
- '''
- Possible values:
- 0 - normal;
- 1 - stacked;
- 2 - pie;
- 3 - exploded;
- '''
- gtype = 0
- if 'stacked' in graphtype:
- gtype = 1
- elif 'pie' in graphtype:
- gtype = 2
- elif 'exploded' in graphtype:
- gtype = 3
-
- return gtype
-
-def get_show_legend(show_legend):
- '''Get the value for show_legend
- 0 - hide
- 1 - (default) show
- '''
- rval = 1
- if 'hide' == show_legend:
- rval = 0
-
- return rval
-
-def get_template_id(zapi, template_name):
- '''
- get related templates
- '''
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'filter': {'host': template_name},})
-
- if content.has_key('result'):
- return content['result'][0]['templateid']
-
- return None
-
-def get_color(color_in='black'):
- ''' Receive a color and translate it to a hex representation of the color
-
- Will have a few setup by default
- '''
- colors = {'black': '000000',
- 'red': 'FF0000',
- 'pink': 'FFC0CB',
- 'purple': '800080',
- 'orange': 'FFA500',
- 'gold': 'FFD700',
- 'yellow': 'FFFF00',
- 'green': '008000',
- 'cyan': '00FFFF',
- 'aqua': '00FFFF',
- 'blue': '0000FF',
- 'brown': 'A52A2A',
- 'gray': '808080',
- 'grey': '808080',
- 'silver': 'C0C0C0',
- }
- if colors.has_key(color_in):
- return colors[color_in]
-
- return color_in
-
-def get_line_style(style):
- '''determine the line style
- '''
- line_style = {'line': 0,
- 'filled': 1,
- 'bold': 2,
- 'dot': 3,
- 'dashed': 4,
- 'gradient': 5,
- }
-
- if line_style.has_key(style):
- return line_style[style]
-
- return 0
-
-def get_calc_function(func):
- '''Determine the caclulation function'''
- rval = 2 # default to avg
- if 'min' in func:
- rval = 1
- elif 'max' in func:
- rval = 4
- elif 'all' in func:
- rval = 7
- elif 'last' in func:
- rval = 9
-
- return rval
-
-def get_graph_item_type(gtype):
- '''Determine the graph item type
- '''
- rval = 0 # simple graph type
- if 'sum' in gtype:
- rval = 2
-
- return rval
-
-def get_graph_items(zapi, gitems):
- '''Get graph items by id'''
-
- r_items = []
- for item in gitems:
- content = zapi.get_content('item%s' % item.get('item_type', ''),
- 'get',
- {'filter': {'name': item['item_name']}})
- _ = item.pop('item_name')
- color = get_color(item.pop('color', 'black'))
- drawtype = get_line_style(item.get('line_style', 'line'))
- func = get_calc_function(item.get('calc_func', 'avg'))
- g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
-
- if content.has_key('result'):
- tmp = {'itemid': content['result'][0]['itemid'],
- 'color': color,
- 'drawtype': drawtype,
- 'calc_fnc': func,
- 'type': g_type,
- }
- r_items.append(tmp)
-
- return r_items
-
-def compare_gitems(zabbix_items, user_items):
- '''Compare zabbix results with the user's supplied items
- return True if user_items are equal
- return False if any of the values differ
- '''
- if len(zabbix_items) != len(user_items):
- return False
-
- for u_item in user_items:
- for z_item in zabbix_items:
- if u_item['itemid'] == z_item['itemid']:
- if not all([str(value) == z_item[key] for key, value in u_item.items()]):
- return False
-
- return True
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_graphprototypes
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- height=dict(default=None, type='int'),
- width=dict(default=None, type='int'),
- graph_type=dict(default='normal', type='str'),
- show_legend=dict(default='show', type='str'),
- state=dict(default='present', type='str'),
- graph_items=dict(default=None, type='list'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'graphprototype'
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'filter': {'name': module.params['name']},
- #'templateids': templateid,
- 'selectGraphItems': 'extend',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- params = {'name': module.params['name'],
- 'height': module.params['height'],
- 'width': module.params['width'],
- 'graphtype': get_graph_type(module.params['graph_type']),
- 'show_legend': get_show_legend(module.params['show_legend']),
- 'gitems': get_graph_items(zapi, module.params['graph_items']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'gitems':
- if not compare_gitems(zab_results[key], value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences['graphid'] = zab_results['graphid']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_host.py b/roles/lib_zabbix/library/zbx_host.py
deleted file mode 100644
index bbeec49ff..000000000
--- a/roles/lib_zabbix/library/zbx_host.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/env python
-'''
-Zabbix host ansible module
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_group_ids(zapi, hostgroup_names):
- '''
- get hostgroups
- '''
- # Fetch groups by name
- group_ids = []
- for hgr in hostgroup_names:
- content = zapi.get_content('hostgroup', 'get', {'search': {'name': hgr}})
- if content.has_key('result'):
- group_ids.append({'groupid': content['result'][0]['groupid']})
-
- return group_ids
-
-def get_template_ids(zapi, template_names):
- '''
- get related templates
- '''
- template_ids = []
- # Fetch templates by name
- for template_name in template_names:
- content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
- if content.has_key('result'):
- template_ids.append({'templateid': content['result'][0]['templateid']})
- return template_ids
-
-def interfaces_equal(zbx_interfaces, user_interfaces):
- '''
- compare interfaces from zabbix and interfaces from user
- '''
-
- for u_int in user_interfaces:
- for z_int in zbx_interfaces:
- for u_key, u_val in u_int.items():
- if str(z_int[u_key]) != str(u_val):
- return False
-
- return True
-
-def main():
- '''
- Ansible module for zabbix host
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- hostgroup_names=dict(default=[], type='list'),
- template_names=dict(default=[], type='list'),
- state=dict(default='present', type='str'),
- interfaces=dict(default=None, type='list'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'host'
- idname = "hostid"
- hname = module.params['name']
- state = module.params['state']
-
- # selectInterfaces doesn't appear to be working but is needed.
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'host': hname},
- 'selectGroups': 'groupid',
- 'selectParentTemplates': 'templateid',
- 'selectInterfaces': 'interfaceid',
- })
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- ifs = module.params['interfaces'] or [{'type': 1, # interface type, 1 = agent
- 'main': 1, # default interface? 1 = true
- 'useip': 1, # default interface? 1 = true
- 'ip': '127.0.0.1', # default interface? 1 = true
- 'dns': '', # dns for host
- 'port': '10050', # port for interface? 10050
- }]
- hostgroup_names = list(set(module.params['hostgroup_names']))
- params = {'host': hname,
- 'groups': get_group_ids(zapi, hostgroup_names),
- 'templates': get_template_ids(zapi, module.params['template_names']),
- 'interfaces': ifs,
- }
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'templates' and zab_results.has_key('parentTemplates'):
- if zab_results['parentTemplates'] != value:
- differences[key] = value
-
-
- elif key == "interfaces":
- if not interfaces_equal(zab_results[key], value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_hostgroup.py b/roles/lib_zabbix/library/zbx_hostgroup.py
deleted file mode 100644
index 6c57d727e..000000000
--- a/roles/lib_zabbix/library/zbx_hostgroup.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/env python
-''' Ansible module for hostgroup
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix hostgroup ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def main():
- ''' ansible module for hostgroup
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'hostgroup'
- idname = "groupid"
- hname = module.params['name']
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'name': hname},
- })
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- params = {'name': hname}
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
- if zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_httptest.py b/roles/lib_zabbix/library/zbx_httptest.py
deleted file mode 100644
index eab45d06e..000000000
--- a/roles/lib_zabbix/library/zbx_httptest.py
+++ /dev/null
@@ -1,290 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for zabbix httpservice
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix item ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_authentication_method(auth):
- ''' determine authentication type'''
- rval = 0
- if 'basic' in auth:
- rval = 1
- elif 'ntlm' in auth:
- rval = 2
-
- return rval
-
-def get_verify_host(verify):
- '''
- get the values for verify_host
- '''
- if verify:
- return 1
-
- return 0
-
-def get_app_id(zapi, application):
- '''
- get related templates
- '''
- # Fetch templates by name
- content = zapi.get_content('application',
- 'get',
- {'search': {'name': application},
- 'selectApplications': ['applicationid', 'name']})
- if content.has_key('result'):
- return content['result'][0]['applicationid']
-
- return None
-
-def get_template_id(zapi, template_name):
- '''
- get related templates
- '''
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name},
- 'selectApplications': ['applicationid', 'name']})
- if content.has_key('result'):
- return content['result'][0]['templateid']
-
- return None
-
-def get_host_id_by_name(zapi, host_name):
- '''Get host id by name'''
- content = zapi.get_content('host',
- 'get',
- {'filter': {'name': host_name}})
-
- return content['result'][0]['hostid']
-
-def get_status(status):
- ''' Determine the status of the web scenario '''
- rval = 0
- if 'disabled' in status:
- return 1
-
- return rval
-
-def find_step(idx, step_list):
- ''' find step by index '''
- for step in step_list:
- if str(step['no']) == str(idx):
- return step
-
- return None
-
-def steps_equal(zab_steps, user_steps):
- '''compare steps returned from zabbix
- and steps passed from user
- '''
-
- if len(user_steps) != len(zab_steps):
- return False
-
- for idx in range(1, len(user_steps)+1):
-
- user = find_step(idx, user_steps)
- zab = find_step(idx, zab_steps)
-
- for key, value in user.items():
- if str(value) != str(zab[key]):
- return False
-
- return True
-
-def process_steps(steps):
- '''Preprocess the step parameters'''
- for idx, step in enumerate(steps):
- if not step.has_key('no'):
- step['no'] = idx + 1
-
- return steps
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_item
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, require=True, type='str'),
- agent=dict(default=None, type='str'),
- template_name=dict(default=None, type='str'),
- host_name=dict(default=None, type='str'),
- interval=dict(default=60, type='int'),
- application=dict(default=None, type='str'),
- authentication=dict(default=None, type='str'),
- http_user=dict(default=None, type='str'),
- http_password=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- status=dict(default='enabled', type='str'),
- steps=dict(default='present', type='list'),
- verify_host=dict(default=False, type='bool'),
- retries=dict(default=1, type='int'),
- headers=dict(default=None, type='dict'),
- query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
- ),
- #supports_check_mode=True
- mutually_exclusive=[['template_name', 'host_name']],
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'httptest'
- state = module.params['state']
- hostid = None
-
- # If a template name was passed then accept the template
- if module.params['template_name']:
- hostid = get_template_id(zapi, module.params['template_name'])
- else:
- hostid = get_host_id_by_name(zapi, module.params['host_name'])
-
- # Fail if a template was not found matching the name
- if not hostid:
- module.exit_json(failed=True,
- changed=False,
- results='Error: Could find template or host with name [%s].' %
- (module.params.get('template_name', module.params['host_name'])),
- state="Unkown")
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {module.params['query_type']: {'name': module.params['name']},
- 'selectSteps': 'extend',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['httptestid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- params = {'name': module.params['name'],
- 'hostid': hostid,
- 'agent': module.params['agent'],
- 'retries': module.params['retries'],
- 'steps': process_steps(module.params['steps']),
- 'applicationid': get_app_id(zapi, module.params['application']),
- 'delay': module.params['interval'],
- 'verify_host': get_verify_host(module.params['verify_host']),
- 'status': get_status(module.params['status']),
- 'headers': module.params['headers'],
- 'http_user': module.params['http_user'],
- 'http_password': module.params['http_password'],
- }
-
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'steps':
- if not steps_equal(zab_results[key], value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- # We have differences and need to update
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- differences['httptestid'] = zab_results['httptestid']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
deleted file mode 100644
index 189485fb7..000000000
--- a/roles/lib_zabbix/library/zbx_item.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for zabbix items
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix item ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_data_type(data_type):
- '''
- Possible values:
- 0 - decimal;
- 1 - octal;
- 2 - hexadecimal;
- 3 - bool;
- '''
- vtype = 0
- if 'octal' in data_type:
- vtype = 1
- elif 'hexadecimal' in data_type:
- vtype = 2
- elif 'bool' in data_type:
- vtype = 3
-
- return vtype
-
-def get_value_type(value_type):
- '''
- Possible values:
- 0 - numeric float;
- 1 - character;
- 2 - log;
- 3 - numeric unsigned;
- 4 - text
- '''
- vtype = 0
- if 'int' in value_type:
- vtype = 3
- elif 'log' in value_type:
- vtype = 2
- elif 'char' in value_type:
- vtype = 1
- elif 'str' in value_type:
- vtype = 4
-
- return vtype
-
-def get_app_ids(application_names, app_name_ids):
- ''' get application ids from names
- '''
- applications = []
- if application_names:
- for app in application_names:
- applications.append(app_name_ids[app])
-
- return applications
-
-def get_template_id(zapi, template_name):
- '''
- get related templates
- '''
- template_ids = []
- app_ids = {}
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name},
- 'selectApplications': ['applicationid', 'name']})
- if content.has_key('result'):
- template_ids.append(content['result'][0]['templateid'])
- for app in content['result'][0]['applications']:
- app_ids[app['name']] = app['applicationid']
-
- return template_ids, app_ids
-
-def get_multiplier(inval):
- ''' Determine the multiplier
- '''
- if inval == None or inval == '':
- return None, 0
-
- rval = None
- try:
- rval = int(inval)
- except ValueError:
- pass
-
- if rval:
- return rval, 1
-
- return rval, 0
-
-def get_zabbix_type(ztype):
- '''
- Determine which type of discoverrule this is
- '''
- _types = {'agent': 0,
- 'SNMPv1': 1,
- 'trapper': 2,
- 'simple': 3,
- 'SNMPv2': 4,
- 'internal': 5,
- 'SNMPv3': 6,
- 'active': 7,
- 'aggregate': 8,
- 'web': 9,
- 'external': 10,
- 'database monitor': 11,
- 'ipmi': 12,
- 'ssh': 13,
- 'telnet': 14,
- 'calculated': 15,
- 'JMX': 16,
- 'SNMP trap': 17,
- }
-
- for typ in _types.keys():
- if ztype in typ or ztype == typ:
- _vtype = _types[typ]
- break
- else:
- _vtype = 2
-
- return _vtype
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_item
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- key=dict(default=None, type='str'),
- template_name=dict(default=None, type='str'),
- zabbix_type=dict(default='trapper', type='str'),
- value_type=dict(default='int', type='str'),
- data_type=dict(default='decimal', type='str'),
- interval=dict(default=60, type='int'),
- delta=dict(default=0, type='int'),
- multiplier=dict(default=None, type='str'),
- description=dict(default=None, type='str'),
- units=dict(default=None, type='str'),
- applications=dict(default=None, type='list'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'item'
- state = module.params['state']
-
- templateid, app_name_ids = get_template_id(zapi, module.params['template_name'])
-
- # Fail if a template was not found matching the name
- if not templateid:
- module.exit_json(failed=True,
- changed=False,
- results='Error: Could find template with name %s for item.' % module.params['template_name'],
- state="Unkown")
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'key_': module.params['key']},
- 'selectApplications': 'applicationid',
- 'templateids': templateid,
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- formula, use_multiplier = get_multiplier(module.params['multiplier'])
- params = {'name': module.params.get('name', module.params['key']),
- 'key_': module.params['key'],
- 'hostid': templateid[0],
- 'type': get_zabbix_type(module.params['zabbix_type']),
- 'value_type': get_value_type(module.params['value_type']),
- 'data_type': get_data_type(module.params['data_type']),
- 'applications': get_app_ids(module.params['applications'], app_name_ids),
- 'formula': formula,
- 'multiplier': use_multiplier,
- 'description': module.params['description'],
- 'units': module.params['units'],
- 'delay': module.params['interval'],
- 'delta': module.params['delta'],
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- _ = params.pop('hostid', None)
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'applications':
- app_ids = [item['applicationid'] for item in zab_results[key]]
- if set(app_ids) != set(value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences['itemid'] = zab_results['itemid']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
deleted file mode 100644
index eab2a04ae..000000000
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ /dev/null
@@ -1,327 +0,0 @@
-#!/usr/bin/env python
-'''
-Zabbix discovery rule ansible module
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_rule_id(zapi, discoveryrule_key, templateid):
- '''get a discoveryrule by name
- '''
- content = zapi.get_content('discoveryrule',
- 'get',
- {'search': {'key_': discoveryrule_key},
- 'output': 'extend',
- 'templateids': templateid,
- })
- if not content['result']:
- return None
- return content['result'][0]['itemid']
-
-def get_template(zapi, template_name):
- '''get a template by name
- '''
- if not template_name:
- return None
-
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name},
- 'output': 'extend',
- 'selectInterfaces': 'interfaceid',
- })
- if not content['result']:
- return None
- return content['result'][0]
-
-def get_multiplier(inval):
- ''' Determine the multiplier
- '''
- if inval == None or inval == '':
- return None, 0
-
- rval = None
- try:
- rval = int(inval)
- except ValueError:
- pass
-
- if rval:
- return rval, 1
-
- return rval, 0
-
-def get_zabbix_type(ztype):
- '''
- Determine which type of discoverrule this is
- '''
- _types = {'agent': 0,
- 'SNMPv1': 1,
- 'trapper': 2,
- 'simple': 3,
- 'SNMPv2': 4,
- 'internal': 5,
- 'SNMPv3': 6,
- 'active': 7,
- 'aggregate': 8,
- 'external': 10,
- 'database monitor': 11,
- 'ipmi': 12,
- 'ssh': 13,
- 'telnet': 14,
- 'calculated': 15,
- 'JMX': 16,
- 'SNMP trap': 17,
- }
-
- for typ in _types.keys():
- if ztype in typ or ztype == typ:
- _vtype = _types[typ]
- break
- else:
- _vtype = 2
-
- return _vtype
-
-def get_data_type(data_type):
- '''
- Possible values:
- 0 - decimal;
- 1 - octal;
- 2 - hexadecimal;
- 3 - bool;
- '''
- vtype = 0
- if 'octal' in data_type:
- vtype = 1
- elif 'hexadecimal' in data_type:
- vtype = 2
- elif 'bool' in data_type:
- vtype = 3
-
- return vtype
-
-def get_value_type(value_type):
- '''
- Possible values:
- 0 - numeric float;
- 1 - character;
- 2 - log;
- 3 - numeric unsigned;
- 4 - text
- '''
- vtype = 0
- if 'int' in value_type:
- vtype = 3
- elif 'char' in value_type:
- vtype = 1
- elif 'str' in value_type:
- vtype = 4
-
- return vtype
-
-def get_status(status):
- ''' Determine status
- '''
- _status = 0
- if status == 'disabled':
- _status = 1
- elif status == 'unsupported':
- _status = 3
-
- return _status
-
-def get_app_ids(zapi, application_names, templateid):
- ''' get application ids from names
- '''
- app_ids = []
- for app_name in application_names:
- content = zapi.get_content('application', 'get', {'filter': {'name': app_name}, 'templateids': templateid})
- if content.has_key('result'):
- app_ids.append(content['result'][0]['applicationid'])
- return app_ids
-
-# pylint: disable=too-many-branches
-def main():
- '''
- Ansible module for zabbix discovery rules
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- key=dict(default=None, type='str'),
- description=dict(default=None, type='str'),
- template_name=dict(default=None, type='str'),
- interfaceid=dict(default=None, type='int'),
- zabbix_type=dict(default='trapper', type='str'),
- value_type=dict(default='float', type='str'),
- data_type=dict(default='decimal', type='str'),
- delay=dict(default=60, type='int'),
- lifetime=dict(default=30, type='int'),
- state=dict(default='present', type='str'),
- status=dict(default='enabled', type='str'),
- applications=dict(default=[], type='list'),
- discoveryrule_key=dict(default=None, type='str'),
- interval=dict(default=60, type='int'),
- delta=dict(default=0, type='int'),
- multiplier=dict(default=None, type='str'),
- units=dict(default=None, type='str'),
-
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'itemprototype'
- idname = "itemid"
- state = module.params['state']
- template = get_template(zapi, module.params['template_name'])
-
- # selectInterfaces doesn't appear to be working but is needed.
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'key_': module.params['key']},
- 'selectApplications': 'applicationid',
- 'selectDiscoveryRule': 'itemid',
- 'templated': True,
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- formula, use_multiplier = get_multiplier(module.params['multiplier'])
-
- params = {'name': module.params['name'],
- 'key_': module.params['key'],
- 'hostid': template['templateid'],
- 'interfaceid': module.params['interfaceid'],
- 'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
- 'type': get_zabbix_type(module.params['zabbix_type']),
- 'value_type': get_value_type(module.params['value_type']),
- 'data_type': get_data_type(module.params['data_type']),
- 'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
- 'formula': formula,
- 'multiplier': use_multiplier,
- 'description': module.params['description'],
- 'units': module.params['units'],
- 'delay': module.params['interval'],
- 'delta': module.params['delta'],
- }
-
- if params['type'] in [2, 5, 7, 8, 11, 15]:
- params.pop('interfaceid')
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
- #******#
- # UPDATE
- #******#
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'ruleid':
- if value != zab_results['discoveryRule']['itemid']:
- differences[key] = value
-
- elif key == 'applications':
- app_ids = [app['applicationid'] for app in zab_results[key]]
- if set(app_ids) - set(value):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_itservice.py b/roles/lib_zabbix/library/zbx_itservice.py
deleted file mode 100644
index aa37f0a2b..000000000
--- a/roles/lib_zabbix/library/zbx_itservice.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for zabbix itservices
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix itservice ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_parent(dependencies):
- '''Put dependencies into the proper update format'''
- rval = None
- for dep in dependencies:
- if dep['relationship'] == 'parent':
- return dep
- return rval
-
-def format_dependencies(dependencies):
- '''Put dependencies into the proper update format'''
- rval = []
- for dep in dependencies:
- rval.append({'dependsOnServiceid': dep['serviceid'],
- 'soft': get_dependency_type(dep['dep_type']),
- })
-
- return rval
-
-def get_dependency_type(dep_type):
- '''Determine the dependency type'''
- rval = 0
- if 'soft' == dep_type:
- rval = 1
-
- return rval
-
-def get_service_id_by_name(zapi, dependencies):
- '''Fetch the service id for an itservice'''
- deps = []
- for dep in dependencies:
- if dep['name'] == 'root':
- deps.append(dep)
- continue
-
- content = zapi.get_content('service',
- 'get',
- {'filter': {'name': dep['name']},
- 'selectDependencies': 'extend',
- })
- if content.has_key('result') and content['result']:
- dep['serviceid'] = content['result'][0]['serviceid']
- deps.append(dep)
-
- return deps
-
-def add_dependencies(zapi, service_name, dependencies):
- '''Fetch the service id for an itservice
-
- Add a dependency on the parent for this current service item.
- '''
-
- results = get_service_id_by_name(zapi, [{'name': service_name}])
-
- content = {}
- for dep in dependencies:
- content = zapi.get_content('service',
- 'adddependencies',
- {'serviceid': results[0]['serviceid'],
- 'dependsOnServiceid': dep['serviceid'],
- 'soft': get_dependency_type(dep['dep_type']),
- })
- if content.has_key('result') and content['result']:
- continue
- else:
- break
-
- return content
-
-def get_show_sla(inc_sla):
- ''' Determine the showsla paramter
- '''
- rval = 1
- if 'do not cacluate' in inc_sla:
- rval = 0
- return rval
-
-def get_algorithm(inc_algorithm_str):
- '''
- Determine which type algorithm
- '''
- rval = 0
- if 'at least one' in inc_algorithm_str:
- rval = 1
- elif 'all' in inc_algorithm_str:
- rval = 2
-
- return rval
-
-# The branches are needed for CRUD and error handling
-# pylint: disable=too-many-branches
-def main():
- '''
- ansible zabbix module for zbx_itservice
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- algorithm=dict(default='do not calculate', choices=['do not calculate', 'at least one', 'all'], type='str'),
- show_sla=dict(default='calculate', choices=['do not calculate', 'calculate'], type='str'),
- good_sla=dict(default='99.9', type='float'),
- sort_order=dict(default=1, type='int'),
- state=dict(default='present', type='str'),
- trigger_id=dict(default=None, type='int'),
- dependencies=dict(default=[], type='list'),
- dep_type=dict(default='hard', choices=['hard', 'soft'], type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'service'
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'filter': {'name': module.params['name']},
- 'selectDependencies': 'extend',
- })
-
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['serviceid']])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- dependencies = get_service_id_by_name(zapi, module.params['dependencies'])
- params = {'name': module.params['name'],
- 'algorithm': get_algorithm(module.params['algorithm']),
- 'showsla': get_show_sla(module.params['show_sla']),
- 'goodsla': module.params['good_sla'],
- 'sortorder': module.params['sort_order'],
- 'triggerid': module.params['trigger_id']
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- if dependencies:
- content = add_dependencies(zapi, module.params['name'], dependencies)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- params['dependencies'] = dependencies
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'goodsla':
- if float(value) != float(zab_results[key]):
- differences[key] = value
-
- elif key == 'dependencies':
- zab_dep_ids = [item['serviceid'] for item in zab_results[key]]
- user_dep_ids = [item['serviceid'] for item in dependencies]
- if set(zab_dep_ids) != set(user_dep_ids):
- differences[key] = format_dependencies(dependencies)
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- differences['serviceid'] = zab_results['serviceid']
- content = zapi.get_content(zbx_class_name, 'update', differences)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_mediatype.py b/roles/lib_zabbix/library/zbx_mediatype.py
deleted file mode 100644
index b8def3ca4..000000000
--- a/roles/lib_zabbix/library/zbx_mediatype.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for mediatype
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix mediatype ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_mtype(mtype):
- '''
- Transport used by the media type.
- Possible values:
- 0 - email;
- 1 - script;
- 2 - SMS;
- 3 - Jabber;
- 100 - Ez Texting.
- '''
- mtype = mtype.lower()
- media_type = None
- if mtype == 'script':
- media_type = 1
- elif mtype == 'sms':
- media_type = 2
- elif mtype == 'jabber':
- media_type = 3
- elif mtype == 'script':
- media_type = 100
- else:
- media_type = 0
-
- return media_type
-
-def main():
- '''
- Ansible zabbix module for mediatype
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- description=dict(default=None, type='str'),
- mtype=dict(default=None, type='str'),
- smtp_server=dict(default=None, type='str'),
- smtp_helo=dict(default=None, type='str'),
- smtp_email=dict(default=None, type='str'),
- passwd=dict(default=None, type='str'),
- path=dict(default=None, type='str'),
- username=dict(default=None, type='str'),
- status=dict(default='enabled', type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'mediatype'
- idname = "mediatypeid"
- description = module.params['description']
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}})
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- status = 1
- if module.params['status']:
- status = 0
- params = {'description': description,
- 'type': get_mtype(module.params['mtype']),
- 'smtp_server': module.params['smtp_server'],
- 'smtp_helo': module.params['smtp_helo'],
- 'smtp_email': module.params['smtp_email'],
- 'passwd': module.params['passwd'],
- 'exec_path': module.params['path'],
- 'username': module.params['username'],
- 'status': status,
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
- if zab_results[key] != value and \
- zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_template.py b/roles/lib_zabbix/library/zbx_template.py
deleted file mode 100644
index cc713b998..000000000
--- a/roles/lib_zabbix/library/zbx_template.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-'''
-Ansible module for template
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix template ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def main():
- ''' Ansible module for template
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'template'
- idname = 'templateid'
- tname = module.params['name']
- state = module.params['state']
- # get a template, see if it exists
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'host': tname},
- 'selectParentTemplates': 'templateid',
- 'selectGroups': 'groupid',
- 'selectApplications': 'applicationid',
- 'selectDiscoveries': 'extend',
- })
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- if not tname:
- module.exit_json(failed=True,
- changed=False,
- results='Must specifiy a template name.',
- state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
- params = {'groups': module.params.get('groups', [{'groupid': '1'}]),
- 'host': tname,
- }
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
- if key == 'templates' and zab_results.has_key('parentTemplates'):
- if zab_results['parentTemplates'] != value:
- differences[key] = value
- elif zab_results[key] != str(value) and zab_results[key] != value:
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=content['result'], state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py
deleted file mode 100644
index 323defbd9..000000000
--- a/roles/lib_zabbix/library/zbx_trigger.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/env python
-'''
-ansible module for zabbix triggers
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix trigger ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_priority(priority):
- ''' determine priority
- '''
- prior = 0
- if 'info' in priority:
- prior = 1
- elif 'warn' in priority:
- prior = 2
- elif 'avg' == priority or 'ave' in priority:
- prior = 3
- elif 'high' in priority:
- prior = 4
- elif 'dis' in priority:
- prior = 5
-
- return prior
-
-def get_deps(zapi, deps):
- ''' get trigger dependencies
- '''
- results = []
- for desc in deps:
- content = zapi.get_content('trigger',
- 'get',
- {'filter': {'description': desc},
- 'expandExpression': True,
- 'selectDependencies': 'triggerid',
- })
- if content.has_key('result'):
- results.append({'triggerid': content['result'][0]['triggerid']})
-
- return results
-
-
-def get_trigger_status(inc_status):
- ''' Determine the trigger's status
- 0 is enabled
- 1 is disabled
- '''
- r_status = 0
- if inc_status == 'disabled':
- r_status = 1
-
- return r_status
-
-def get_template_id(zapi, template_name):
- '''
- get related templates
- '''
- template_ids = []
- app_ids = {}
- # Fetch templates by name
- content = zapi.get_content('template',
- 'get',
- {'search': {'host': template_name},
- 'selectApplications': ['applicationid', 'name']})
- if content.has_key('result'):
- template_ids.append(content['result'][0]['templateid'])
- for app in content['result'][0]['applications']:
- app_ids[app['name']] = app['applicationid']
-
- return template_ids, app_ids
-
-def main():
- '''
- Create a trigger in zabbix
-
- Example:
- "params": {
- "description": "Processor load is too high on {HOST.NAME}",
- "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5",
- "dependencies": [
- {
- "triggerid": "14062"
- }
- ]
- },
-
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- expression=dict(default=None, type='str'),
- name=dict(default=None, type='str'),
- description=dict(default=None, type='str'),
- dependencies=dict(default=[], type='list'),
- priority=dict(default='avg', type='str'),
- url=dict(default=None, type='str'),
- status=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- template_name=dict(default=None, type='str'),
- hostgroup_name=dict(default=None, type='str'),
- query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'trigger'
- idname = "triggerid"
- state = module.params['state']
- tname = module.params['name']
-
- templateid = None
- if module.params['template_name']:
- templateid, _ = get_template_id(zapi, module.params['template_name'])
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {module.params['query_type']: {'description': tname},
- 'expandExpression': True,
- 'selectDependencies': 'triggerid',
- 'templateids': templateid,
- 'group': module.params['hostgroup_name'],
- })
-
- # Get
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- # Delete
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
- params = {'description': tname,
- 'comments': module.params['description'],
- 'expression': module.params['expression'],
- 'dependencies': get_deps(zapi, module.params['dependencies']),
- 'priority': get_priority(module.params['priority']),
- 'url': module.params['url'],
- 'status': get_trigger_status(module.params['status']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_triggerprototype.py b/roles/lib_zabbix/library/zbx_triggerprototype.py
deleted file mode 100644
index 34a7396a7..000000000
--- a/roles/lib_zabbix/library/zbx_triggerprototype.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/env python
-'''
-ansible module for zabbix triggerprototypes
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix triggerprototypes ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_priority(priority):
- ''' determine priority
- '''
- prior = 0
- if 'info' in priority:
- prior = 1
- elif 'warn' in priority:
- prior = 2
- elif 'avg' == priority or 'ave' in priority:
- prior = 3
- elif 'high' in priority:
- prior = 4
- elif 'dis' in priority:
- prior = 5
-
- return prior
-
-def get_trigger_status(inc_status):
- ''' Determine the trigger's status
- 0 is enabled
- 1 is disabled
- '''
- r_status = 0
- if inc_status == 'disabled':
- r_status = 1
-
- return r_status
-
-
-def main():
- '''
- Create a triggerprototype in zabbix
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- name=dict(default=None, type='str'),
- expression=dict(default=None, type='str'),
- description=dict(default=None, type='str'),
- priority=dict(default='avg', type='str'),
- url=dict(default=None, type='str'),
- status=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'triggerprototype'
- idname = "triggerid"
- state = module.params['state']
- tname = module.params['name']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'filter': {'description': tname},
- 'expandExpression': True,
- 'selectDependencies': 'triggerid',
- })
-
- # Get
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- # Delete
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
- params = {'description': tname,
- 'comments': module.params['description'],
- 'expression': module.params['expression'],
- 'priority': get_priority(module.params['priority']),
- 'url': module.params['url'],
- 'status': get_trigger_status(module.params['status']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py
deleted file mode 100644
index 68c5cfbfe..000000000
--- a/roles/lib_zabbix/library/zbx_user.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/env python
-'''
-ansible module for zabbix users
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix user ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_usergroups(zapi, usergroups):
- ''' Get usergroups
- '''
- ugroups = []
- for ugr in usergroups:
- content = zapi.get_content('usergroup',
- 'get',
- {'search': {'name': ugr},
- #'selectUsers': 'userid',
- #'getRights': 'extend'
- })
- if content['result']:
- ugroups.append({'usrgrpid': content['result'][0]['usrgrpid']})
-
- return ugroups or None
-
-def get_passwd(passwd):
- '''Determine if password is set, if not, return 'zabbix'
- '''
- if passwd:
- return passwd
-
- return 'zabbix'
-
-def get_usertype(user_type):
- '''
- Determine zabbix user account type
- '''
- if not user_type:
- return None
-
- utype = 1
- if 'super' in user_type:
- utype = 3
- elif 'admin' in user_type or user_type == 'admin':
- utype = 2
-
- return utype
-
-def main():
- '''
- ansible zabbix module for users
- '''
-
- ##def user(self, name, state='present', params=None):
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- login=dict(default=None, type='str'),
- first_name=dict(default=None, type='str'),
- last_name=dict(default=None, type='str'),
- user_type=dict(default=None, type='str'),
- password=dict(default=None, type='str'),
- refresh=dict(default=None, type='int'),
- update_password=dict(default=False, type='bool'),
- user_groups=dict(default=[], type='list'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- ## before we can create a user media and users with media types we need media
- zbx_class_name = 'user'
- idname = "userid"
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'output': 'extend',
- 'search': {'alias': module.params['login']},
- "selectUsrgrps": 'usergrpid',
- })
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- if state == 'absent':
- if not exists(content) or len(content['result']) == 0:
- module.exit_json(changed=False, state="absent")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- if state == 'present':
-
- params = {'alias': module.params['login'],
- 'passwd': get_passwd(module.params['password']),
- 'usrgrps': get_usergroups(zapi, module.params['user_groups']),
- 'name': module.params['first_name'],
- 'surname': module.params['last_name'],
- 'refresh': module.params['refresh'],
- 'type': get_usertype(module.params['user_type']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
-
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('Error'):
- module.exit_json(failed=True, changed=False, results=content, state='present')
-
- module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
- differences = {}
-
- # Update password
- if not module.params['update_password']:
- params.pop('passwd', None)
-
- zab_results = content['result'][0]
- for key, value in params.items():
-
- if key == 'usrgrps':
- # this must be done as a list of ordered dictionaries fails comparison
- if not all([_ in value for _ in zab_results[key]]):
- differences[key] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py
deleted file mode 100644
index fc5624346..000000000
--- a/roles/lib_zabbix/library/zbx_user_media.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/env python
-'''
- Ansible module for user media
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix user media ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_mtype(zapi, mtype):
- '''Get mediatype
-
- If passed an int, return it as the mediatypeid
- if its a string, then try to fetch through a description
- '''
- if isinstance(mtype, int):
- return mtype
- try:
- return int(mtype)
- except ValueError:
- pass
-
- content = zapi.get_content('mediatype', 'get', {'filter': {'description': mtype}})
- if content.has_key('result') and content['result']:
- return content['result'][0]['mediatypeid']
-
- return None
-
-def get_user(zapi, user):
- ''' Get userids from user aliases
- '''
- content = zapi.get_content('user', 'get', {'filter': {'alias': user}})
- if content['result']:
- return content['result'][0]
-
- return None
-
-def get_severity(severity):
- ''' determine severity
- '''
- if isinstance(severity, int) or \
- isinstance(severity, str):
- return severity
-
- val = 0
- sev_map = {
- 'not': 2**0,
- 'inf': 2**1,
- 'war': 2**2,
- 'ave': 2**3,
- 'avg': 2**3,
- 'hig': 2**4,
- 'dis': 2**5,
- }
- for level in severity:
- val |= sev_map[level[:3].lower()]
- return val
-
-def get_zbx_user_query_data(zapi, user_name):
- ''' If name exists, retrieve it, and build query params.
- '''
- query = {}
- if user_name:
- zbx_user = get_user(zapi, user_name)
- query = {'userid': zbx_user['userid']}
-
- return query
-
-def find_media(medias, user_media):
- ''' Find the user media in the list of medias
- '''
- for media in medias:
- if all([media[key] == str(user_media[key]) for key in user_media.keys()]):
- return media
- return None
-
-def get_active(is_active):
- '''Determine active value
- 0 - enabled
- 1 - disabled
- '''
- active = 1
- if is_active:
- active = 0
-
- return active
-
-def get_mediatype(zapi, mediatype, mediatype_desc):
- ''' Determine mediatypeid
- '''
- mtypeid = None
- if mediatype:
- mtypeid = get_mtype(zapi, mediatype)
- elif mediatype_desc:
- mtypeid = get_mtype(zapi, mediatype_desc)
-
- return mtypeid
-
-def preprocess_medias(zapi, medias):
- ''' Insert the correct information when processing medias '''
- for media in medias:
- # Fetch the mediatypeid from the media desc (name)
- if media.has_key('mediatype'):
- media['mediatypeid'] = get_mediatype(zapi, mediatype=None, mediatype_desc=media.pop('mediatype'))
-
- media['active'] = get_active(media.get('active'))
- media['severity'] = int(get_severity(media['severity']))
-
- return medias
-
-# Disabling branching as the logic requires branches.
-# I've also added a few safeguards which required more branches.
-# pylint: disable=too-many-branches
-def main():
- '''
- Ansible zabbix module for mediatype
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- login=dict(default=None, type='str'),
- active=dict(default=False, type='bool'),
- medias=dict(default=None, type='list'),
- mediaid=dict(default=None, type='int'),
- mediatype=dict(default=None, type='str'),
- mediatype_desc=dict(default=None, type='str'),
- #d-d,hh:mm-hh:mm;d-d,hh:mm-hh:mm...
- period=dict(default=None, type='str'),
- sendto=dict(default=None, type='str'),
- severity=dict(default=None, type='str'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- #Set the instance and the template for the rest of the calls
- zbx_class_name = 'user'
- idname = "mediaid"
- state = module.params['state']
-
- # User media is fetched through the usermedia.get
- zbx_user_query = get_zbx_user_query_data(zapi, module.params['login'])
- content = zapi.get_content('usermedia', 'get',
- {'userids': [uid for user, uid in zbx_user_query.items()]})
- #####
- # Get
- #####
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- ########
- # Delete
- ########
- if state == 'absent':
- if not exists(content) or len(content['result']) == 0:
- module.exit_json(changed=False, state="absent")
-
- if not module.params['login']:
- module.exit_json(failed=True, changed=False, results='Must specifiy a user login.', state="absent")
-
- content = zapi.get_content(zbx_class_name, 'deletemedia', [res[idname] for res in content['result']])
-
- if content.has_key('error'):
- module.exit_json(changed=False, results=content['error'], state="absent")
-
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
- active = get_active(module.params['active'])
- mtypeid = get_mediatype(zapi, module.params['mediatype'], module.params['mediatype_desc'])
-
- medias = module.params['medias']
- if medias == None:
- medias = [{'mediatypeid': mtypeid,
- 'sendto': module.params['sendto'],
- 'active': active,
- 'severity': int(get_severity(module.params['severity'])),
- 'period': module.params['period'],
- }]
- else:
- medias = preprocess_medias(zapi, medias)
-
- params = {'users': [zbx_user_query],
- 'medias': medias,
- 'output': 'extend',
- }
-
- ########
- # Create
- ########
- if not exists(content):
- if not params['medias']:
- module.exit_json(changed=False, results=content['result'], state='present')
-
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'addmedia', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
- # mediaid signifies an update
- # If user params exists, check to see if they already exist in zabbix
- # if they exist, then return as no update
- # elif they do not exist, then take user params only
- ########
- # Update
- ########
- diff = {'medias': [], 'users': {}}
- _ = [diff['medias'].append(media) for media in params['medias'] if not find_media(content['result'], media)]
-
- if not diff['medias']:
- module.exit_json(changed=False, results=content['result'], state="present")
-
- for user in params['users']:
- diff['users']['userid'] = user['userid']
-
- # Medias have no real unique key so therefore we need to make it like the incoming user's request
- diff['medias'] = medias
-
- # We have differences and need to update
- content = zapi.get_content(zbx_class_name, 'updatemedia', diff)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=False, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/library/zbx_usergroup.py b/roles/lib_zabbix/library/zbx_usergroup.py
deleted file mode 100644
index e30ce6678..000000000
--- a/roles/lib_zabbix/library/zbx_usergroup.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/env python
-'''
-zabbix ansible module for usergroups
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-# Zabbix usergroup ansible module
-#
-#
-# Copyright 2015 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# This is in place because each module looks similar to each other.
-# These need duplicate code as their behavior is very similar
-# but different for each zabbix class.
-# pylint: disable=duplicate-code
-
-# Disabling too-many-branches as we need the error checking and the if-statements
-# to determine the proper state
-# pylint: disable=too-many-branches
-
-# pylint: disable=import-error
-from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
-
-def exists(content, key='result'):
- ''' Check if key exists in content or the size of content[key] > 0
- '''
- if not content.has_key(key):
- return False
-
- if not content[key]:
- return False
-
- return True
-
-def get_rights(zapi, rights):
- '''Get rights
- '''
- if rights == None:
- return None
-
- perms = []
- for right in rights:
- hstgrp = right.keys()[0]
- perm = right.values()[0]
- content = zapi.get_content('hostgroup', 'get', {'search': {'name': hstgrp}})
- if content['result']:
- permission = 0
- if perm == 'ro':
- permission = 2
- elif perm == 'rw':
- permission = 3
- perms.append({'id': content['result'][0]['groupid'],
- 'permission': permission})
- return perms
-
-def get_gui_access(access):
- ''' Return the gui_access for a usergroup
- '''
- access = access.lower()
- if access == 'internal':
- return 1
- elif access == 'disabled':
- return 2
-
- return 0
-
-def get_debug_mode(mode):
- ''' Return the debug_mode for a usergroup
- '''
- mode = mode.lower()
- if mode == 'enabled':
- return 1
-
- return 0
-
-def get_user_status(status):
- ''' Return the user_status for a usergroup
- '''
- status = status.lower()
- if status == 'enabled':
- return 0
-
- return 1
-
-
-def get_userids(zapi, users):
- ''' Get userids from user aliases
- '''
- if not users:
- return None
-
- userids = []
- for alias in users:
- content = zapi.get_content('user', 'get', {'search': {'alias': alias}})
- if content['result']:
- userids.append(content['result'][0]['userid'])
-
- return userids
-
-def main():
- ''' Ansible module for usergroup
- '''
-
- module = AnsibleModule(
- argument_spec=dict(
- zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
- zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
- zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
- zbx_debug=dict(default=False, type='bool'),
- debug_mode=dict(default='disabled', type='str'),
- gui_access=dict(default='default', type='str'),
- status=dict(default='enabled', type='str'),
- name=dict(default=None, type='str', required=True),
- rights=dict(default=None, type='list'),
- users=dict(default=None, type='list'),
- state=dict(default='present', type='str'),
- ),
- #supports_check_mode=True
- )
-
- zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
- module.params['zbx_user'],
- module.params['zbx_password'],
- module.params['zbx_debug']))
-
- zbx_class_name = 'usergroup'
- idname = "usrgrpid"
- uname = module.params['name']
- state = module.params['state']
-
- content = zapi.get_content(zbx_class_name,
- 'get',
- {'search': {'name': uname},
- 'selectUsers': 'userid',
- })
- #******#
- # GET
- #******#
- if state == 'list':
- module.exit_json(changed=False, results=content['result'], state="list")
-
- #******#
- # DELETE
- #******#
- if state == 'absent':
- if not exists(content):
- module.exit_json(changed=False, state="absent")
-
- if not uname:
- module.exit_json(failed=True, changed=False, results='Need to pass in a user.', state="error")
-
- content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
- module.exit_json(changed=True, results=content['result'], state="absent")
-
- # Create and Update
- if state == 'present':
-
- params = {'name': uname,
- 'rights': get_rights(zapi, module.params['rights']),
- 'users_status': get_user_status(module.params['status']),
- 'gui_access': get_gui_access(module.params['gui_access']),
- 'debug_mode': get_debug_mode(module.params['debug_mode']),
- 'userids': get_userids(zapi, module.params['users']),
- }
-
- # Remove any None valued params
- _ = [params.pop(key, None) for key in params.keys() if params[key] == None]
-
- #******#
- # CREATE
- #******#
- if not exists(content):
- # if we didn't find it, create it
- content = zapi.get_content(zbx_class_name, 'create', params)
-
- if content.has_key('error'):
- module.exit_json(failed=True, changed=True, results=content['error'], state="present")
-
- module.exit_json(changed=True, results=content['result'], state='present')
-
-
- ########
- # UPDATE
- ########
- differences = {}
- zab_results = content['result'][0]
- for key, value in params.items():
- if key == 'rights':
- differences['rights'] = value
-
- elif key == 'userids' and zab_results.has_key('users'):
- if zab_results['users'] != value:
- differences['userids'] = value
-
- elif zab_results[key] != value and zab_results[key] != str(value):
- differences[key] = value
-
- if not differences:
- module.exit_json(changed=False, results=zab_results, state="present")
-
- # We have differences and need to update
- differences[idname] = zab_results[idname]
- content = zapi.get_content(zbx_class_name, 'update', differences)
- module.exit_json(changed=True, results=content['result'], state="present")
-
- module.exit_json(failed=True,
- changed=False,
- results='Unknown state passed. %s' % state,
- state="unknown")
-
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
-# import module snippets. This are required
-from ansible.module_utils.basic import *
-
-main()
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
deleted file mode 100644
index 783249c3a..000000000
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ /dev/null
@@ -1,146 +0,0 @@
----
-- name: Template Create Template
- zbx_template:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ template.name }}"
- register: created_template
-
-
-- set_fact:
- lzbx_item_applications: "{{ template.zitems | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
- lzbx_itemprototype_applications: "{{ template.zitemprototypes | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
-
-- name: Create Application
- zbx_application:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item }}"
- template_name: "{{ template.name }}"
- with_items:
- - "{{ lzbx_item_applications }}"
- - "{{ lzbx_itemprototype_applications }}"
- register: created_application
- when: template.zitems is defined or template.zitemprototypes is defined
-
-- name: Create Items
- zbx_item:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- key: "{{ item.key }}"
- name: "{{ item.name | default(item.key, true) }}"
- value_type: "{{ item.value_type | default('int') }}"
- data_type: "{{ item.data_type | default('decimal') }}"
- description: "{{ item.description | default('', True) }}"
- multiplier: "{{ item.multiplier | default('', True) }}"
- units: "{{ item.units | default('', True) }}"
- template_name: "{{ template.name }}"
- applications: "{{ item.applications }}"
- zabbix_type: "{{ item.zabbix_type | default('trapper') }}"
- interval: "{{ item.interval | default(60, True) }}"
- delta: "{{ item.delta | default(0, True) }}"
- with_items: template.zitems
- register: created_items
- when: template.zitems is defined
-
-- name: Create Triggers
- zbx_trigger:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- description: "{{ item.description | default('', True) }}"
- dependencies: "{{ item.dependencies | default([], True) }}"
- expression: "{{ item.expression }}"
- priority: "{{ item.priority }}"
- url: "{{ item.url | default(None, True) }}"
- status: "{{ item.status | default('', True) }}"
- with_items: template.ztriggers
- when: template.ztriggers is defined
-
-- name: Create Actions
- zbx_action:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- state: "{{ item.state | default('present', True) }}"
- name: "{{ item.name }}"
- status: "{{ item.status | default('enabled', True) }}"
- escalation_time: "{{ item.escalation_time }}"
- conditions_filter: "{{ item.conditions_filter }}"
- operations: "{{ item.operations }}"
- with_items: template.zactions
- when: template.zactions is defined
-
-- name: Create Discoveryrules
- zbx_discoveryrule:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- key: "{{ item.key }}"
- lifetime: "{{ item.lifetime }}"
- template_name: "{{ template.name }}"
- description: "{{ item.description | default('', True) }}"
- with_items: template.zdiscoveryrules
- when: template.zdiscoveryrules is defined
-
-- name: Create Item Prototypes
- zbx_itemprototype:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- key: "{{ item.key }}"
- discoveryrule_key: "{{ item.discoveryrule_key }}"
- value_type: "{{ item.value_type }}"
- data_type: "{{ item.data_type | default('decimal') }}"
- template_name: "{{ template.name }}"
- applications: "{{ item.applications }}"
- description: "{{ item.description | default('', True) }}"
- multiplier: "{{ item.multiplier | default('', True) }}"
- units: "{{ item.units | default('', True) }}"
- interval: "{{ item.interval | default(60, True) }}"
- delta: "{{ item.delta | default(0, True) }}"
- with_items: template.zitemprototypes
- when: template.zitemprototypes is defined
-
-- name: Create Trigger Prototypes
- zbx_triggerprototype:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- expression: "{{ item.expression }}"
- url: "{{ item.url | default('', True) }}"
- priority: "{{ item.priority | default('average', True) }}"
- description: "{{ item.description | default('', True) }}"
- with_items: template.ztriggerprototypes
- when: template.ztriggerprototypes is defined
-
-- name: Create Graphs
- zbx_graph:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- height: "{{ item.height }}"
- width: "{{ item.width }}"
- graph_items: "{{ item.graph_items }}"
- with_items: template.zgraphs
- when: template.zgraphs is defined
-
-- name: Create Graph Prototypes
- zbx_graphprototype:
- zbx_server: "{{ server }}"
- zbx_user: "{{ user }}"
- zbx_password: "{{ password }}"
- name: "{{ item.name }}"
- height: "{{ item.height }}"
- width: "{{ item.width }}"
- graph_items: "{{ item.graph_items }}"
- with_items: template.zgraphprototypes
- when: template.zgraphprototypes is defined
diff --git a/roles/lib_zabbix/tasks/create_user.yml b/roles/lib_zabbix/tasks/create_user.yml
deleted file mode 100644
index 1f752a9e1..000000000
--- a/roles/lib_zabbix/tasks/create_user.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Update zabbix credentialss for a user
- zbx_user:
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- alias: "{{ ozb_username }}"
- passwd: "{{ ozb_new_password | default(ozb_password, true) }}"
- register: user
-
-- debug: var=user.results
diff --git a/roles/nuage_master/files/serviceaccount.sh b/roles/nuage_master/files/serviceaccount.sh
deleted file mode 100644
index f6fdb8a8d..000000000
--- a/roles/nuage_master/files/serviceaccount.sh
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-# Parse CLI options
-for i in "$@"; do
- case $i in
- --master-cert-dir=*)
- MASTER_DIR="${i#*=}"
- CA_CERT=${MASTER_DIR}/ca.crt
- CA_KEY=${MASTER_DIR}/ca.key
- CA_SERIAL=${MASTER_DIR}/ca.serial.txt
- ADMIN_FILE=${MASTER_DIR}/admin.kubeconfig
- ;;
- --server=*)
- SERVER="${i#*=}"
- ;;
- --output-cert-dir=*)
- OUTDIR="${i#*=}"
- CONFIG_FILE=${OUTDIR}/nuage.kubeconfig
- ;;
- esac
-done
-
-# If any are missing, print the usage and exit
-if [ -z $SERVER ] || [ -z $OUTDIR ] || [ -z $MASTER_DIR ]; then
- echo "Invalid syntax: $@"
- echo "Usage:"
- echo " $0 --server=<address>:<port> --output-cert-dir=/path/to/output/dir/ --master-cert-dir=/path/to/master/"
- echo "--master-cert-dir: Directory where the master's configuration is held"
- echo "--server: Address of Kubernetes API server (default port is 8443)"
- echo "--output-cert-dir: Directory to put artifacts in"
- echo ""
- echo "All options are required"
- exit 1
-fi
-
-# Login as admin so that we can create the service account
-oc login -u system:admin --config=$ADMIN_FILE || exit 1
-oc project default --config=$ADMIN_FILE
-
-ACCOUNT_CONFIG='
-{
- "apiVersion": "v1",
- "kind": "ServiceAccount",
- "metadata": {
- "name": "nuage"
- }
-}
-'
-
-# Create the account with the included info
-echo $ACCOUNT_CONFIG|oc create --config=$ADMIN_FILE -f -
-
-# Add the cluser-reader role, which allows this service account read access to
-# everything in the cluster except secrets
-oadm policy add-cluster-role-to-user cluster-reader system:serviceaccounts:default:nuage --config=$ADMIN_FILE
-
-# Generate certificates and a kubeconfig for the service account
-oadm create-api-client-config --certificate-authority=${CA_CERT} --client-dir=${OUTDIR} --signer-cert=${CA_CERT} --signer-key=${CA_KEY} --signer-serial=${CA_SERIAL} --user=system:serviceaccounts:default:nuage --master=${SERVER} --public-master=${SERVER} --basename='nuage'
-
-# Verify the finalized kubeconfig
-if ! [ $(oc whoami --config=$CONFIG_FILE) == 'system:serviceaccounts:default:nuage' ]; then
- echo "Service account creation failed!"
- exit 1
-fi
diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml
index 5d133cf16..56224cf82 100644
--- a/roles/nuage_master/handlers/main.yaml
+++ b/roles/nuage_master/handlers/main.yaml
@@ -1,6 +1,6 @@
---
- name: restart nuage-openshift-monitor
- sudo: true
+ become: yes
service: name=nuage-openshift-monitor state=restarted
- name: restart master
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index abeee3d71..b8eaede3b 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,22 +1,20 @@
---
- name: Create directory /usr/share/nuage-openshift-monitor
- sudo: true
+ become: yes
file: path=/usr/share/nuage-openshift-monitor state=directory
- name: Create the log directory
- sudo: true
+ become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
- name: Install Nuage Openshift Monitor
- sudo: true
+ become: yes
yum: name={{ nuage_openshift_rpm }} state=present
-- name: Run the service account creation script
- sudo: true
- script: serviceaccount.sh --server={{ openshift.master.api_url }} --output-cert-dir={{ cert_output_dir }} --master-cert-dir={{ openshift_master_config_dir }}
+- include: serviceaccount.yml
- name: Download the certs and keys
- sudo: true
+ become: yes
fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
with_items:
- ca.crt
@@ -27,7 +25,7 @@
- include: certificates.yml
- name: Create nuage-openshift-monitor.yaml
- sudo: true
+ become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
notify:
- restart master
diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml
new file mode 100644
index 000000000..5b4af5824
--- /dev/null
+++ b/roles/nuage_master/tasks/serviceaccount.yml
@@ -0,0 +1,51 @@
+---
+- name: Create temporary directory for admin kubeconfig
+ command: mktemp -u /tmp/openshift-ansible-XXXXXXX.kubeconfig
+ register: nuage_tmp_conf_mktemp
+ changed_when: False
+
+- set_fact:
+ nuage_tmp_conf: "{{ nuage_tmp_conf_mktemp.stdout }}"
+
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{nuage_tmp_conf}}
+ changed_when: false
+
+- name: Create Admin Service Account
+ shell: >
+ echo {{ nuage_service_account_config | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n default
+ --config={{nuage_tmp_conf}}
+ -f -
+ register: osnuage_create_service_account
+ failed_when: "'already exists' not in osnuage_create_service_account.stderr and osnuage_create_service_account.rc != 0"
+ changed_when: osnuage_create_service_account.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{nuage_tmp_conf}}
+ with_items: "{{nuage_tasks}}"
+ register: osnuage_perm_task
+ failed_when: "'already exists' not in osnuage_perm_task.stderr and osnuage_perm_task.rc != 0"
+ changed_when: osnuage_perm_task.rc == 0
+
+- name: Generate the node client config
+ command: >
+ {{ openshift.common.admin_binary }} create-api-client-config
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --client-dir={{ cert_output_dir }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.api_url }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
+ --basename='nuage'
+ --user={{ nuage_service_account }}
+
+- name: Clean temporary configuration file
+ command: >
+ rm -f {{nuage_tmp_conf}}
+ changed_when: false
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index 7228e646b..075de9d9e 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,6 +15,12 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# CSP admin user's password
+cspAdminPassword: {{ nuage_master_cspadminpasswd }}
+# Enterprise admin user name
+enterpriseAdminUser: {{ nuage_master_adminusername }}
+# Enterprise admin password
+enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server paramters
diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml
index ec4562c77..d3536eb33 100644
--- a/roles/nuage_master/vars/main.yaml
+++ b/roles/nuage_master/vars/main.yaml
@@ -1,4 +1,7 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
cert_output_dir: /usr/share/nuage-openshift-monitor
@@ -15,3 +18,17 @@ nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonSe
nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_service_account: system:serviceaccount:default:nuage
+
+nuage_service_account_config:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: nuage
+
+nuage_tasks:
+ - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}
+
+nuage_master_cspadminpasswd: ''
+nuage_master_adminusername: 'admin'
+nuage_master_adminuserpasswd: 'admin'
diff --git a/roles/nuage_node/handlers/main.yaml b/roles/nuage_node/handlers/main.yaml
index 25482a845..5f2b97ae2 100644
--- a/roles/nuage_node/handlers/main.yaml
+++ b/roles/nuage_node/handlers/main.yaml
@@ -1,8 +1,8 @@
---
- name: restart vrs
- sudo: true
+ become: yes
service: name=openvswitch state=restarted
- name: restart node
- sudo: true
+ become: yes
service: name={{ openshift.common.service_type }}-node state=restarted
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d7dd53802..1146573d3 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -1,27 +1,27 @@
---
- name: Install Nuage VRS
- sudo: true
+ become: yes
yum: name={{ vrs_rpm }} state=present
- name: Set the uplink interface
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
- name: Set the Active Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
- name: Set the Standby Controller
- sudo: true
+ become: yes
lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
when: vsc_standby_ip is defined
- name: Install plugin rpm
- sudo: true
+ become: yes
yum: name={{ plugin_rpm }} state=present
- name: Copy the certificates and keys
- sudo: true
+ become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
with_items:
- ca.crt
@@ -32,7 +32,7 @@
- include: certificates.yml
- name: Set the vsp-openshift.yaml
- sudo: true
+ become: yes
template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
notify:
- restart vrs
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index a4d7052a7..86486259f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -8,7 +8,7 @@ ca_cert: "{{ vsp_openshift_dir }}/ca.crt"
api_server: "{{ openshift_node_master_api_url }}"
nuage_mon_rest_server_port: "{{ nuage_openshift_monitor_rest_server_port | default('9443') }}"
nuage_mon_rest_server_url: "https://{{ openshift_master_cluster_hostname }}:{{ nuage_mon_rest_server_port }}"
-docker_bridge: "docker0"
+docker_bridge: "{{ nuage_docker_bridge | default('docker0') }}"
rest_client_cert: "{{ vsp_openshift_dir }}/nuageMonClient.crt"
rest_client_key: "{{ vsp_openshift_dir }}/nuageMonClient.key"
rest_server_ca_cert: "{{ vsp_openshift_dir }}/nuageMonCA.crt"
diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml
new file mode 100644
index 000000000..422d08400
--- /dev/null
+++ b/roles/openshift_builddefaults/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Build Defaults configuration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
new file mode 100644
index 000000000..c82aebe72
--- /dev/null
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Set builddefaults
+ openshift_facts:
+ role: builddefaults
+ # TODO: add ability to define builddefaults env vars sort of like this
+ # may need to move the config generation to a filter however.
+ # openshift_env: "{{ hostvars[inventory_hostname]
+ # | oo_merge_dicts(hostvars)
+ # | oo_openshift_env }}"
+ # openshift_env_structures:
+ # - 'openshift.builddefaults.env.*'
+ local_facts:
+ http_proxy: "{{ openshift_builddefaults_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_builddefaults_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
+ git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
+ git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
+
+- name: Set builddefaults config structure
+ openshift_facts:
+ role: builddefaults
+ local_facts:
+ config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
+
diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml
new file mode 100644
index 000000000..9727c73a5
--- /dev/null
+++ b/roles/openshift_builddefaults/vars/main.yml
@@ -0,0 +1,15 @@
+---
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.builddefaults.git_http_proxy | default('', true) }}"
+ gitHTTPSProxy: "{{ openshift.builddefaults.git_https_proxy | default('', true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.builddefaults.http_proxy | default('', true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.builddefaults.https_proxy | default('', true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}"
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
index d45f62eca..1fc8a074a 100644
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ b/roles/openshift_cluster_metrics/tasks/main.yml
@@ -28,7 +28,6 @@
cluster-reader
system:serviceaccount:default:heapster
register: oex_cluster_header_role
- register: oex_cluster_header_role
failed_when: "'already exists' not in oex_cluster_header_role.stderr and oex_cluster_header_role.rc != 0"
changed_when: false
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index 87306d4a6..2a271854b 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -20,6 +20,7 @@ Role Variables
| openshift_ip | UNDEF | Internal IP address to use for this host |
| openshift_public_hostname | UNDEF | Public hostname to use for this host |
| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_portal_net | UNDEF | Service IP CIDR |
Dependencies
------------
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index eda43b9f8..d5166b52e 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -27,6 +27,8 @@
use_nuage: "{{ openshift_use_nuage | default(None) }}"
use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
data_dir: "{{ openshift_data_dir | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
# Using oo_image_tag_to_rpm_version here is a workaround for how
# openshift_version is set. That value is computed based on either RPM
diff --git a/roles/openshift_docker/tasks/main.yml b/roles/openshift_docker/tasks/main.yml
index 23613b762..873229b34 100644
--- a/roles/openshift_docker/tasks/main.yml
+++ b/roles/openshift_docker/tasks/main.yml
@@ -4,12 +4,13 @@
# openshift_image_tag correctly for upgrades.
- name: Set version when containerized
command: >
- docker run --rm {{ openshift.common.cli_image }}:latest version
+ docker run --rm {{ openshift.common.cli_image }} version
register: cli_image_version
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
- l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
+ l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-') if openshift.common.deployment_type == 'origin' else
+ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool and openshift_image_tag is not defined
- set_fact:
@@ -24,5 +25,5 @@
- role: docker
local_facts:
openshift_image_tag: "{{ l_image_tag }}"
- openshift_version: "{{ l_image_tag if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
+ openshift_version: "{{ l_image_tag.split('-')[0] if l_image_tag is defined else '' | oo_image_tag_to_rpm_version }}"
when: openshift.common.is_containerized is defined and openshift.common.is_containerized | bool
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 89393168b..3acd2bba8 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -27,6 +27,9 @@
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
| default(omit) }}"
+ docker_http_proxy: "{{ openshift.common.http_proxy | default(omit) }}"
+ docker_https_proxy: "{{ openshift.common.https_proxy | default(omit) }}"
+ docker_no_proxy: "{{ openshift.common.no_proxy | default(omit) }}"
- set_fact:
docker_options: >
diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml
index 5e5f96d44..7cc548f69 100644
--- a/roles/openshift_etcd/meta/main.yml
+++ b/roles/openshift_etcd/meta/main.yml
@@ -12,7 +12,7 @@ galaxy_info:
categories:
- cloud
dependencies:
-- role: openshift_facts
+- role: openshift_etcd_facts
- role: openshift_docker
when: openshift.common.is_containerized | bool
- role: etcd
diff --git a/roles/openshift_etcd_certificates/meta/main.yml b/roles/openshift_etcd_certificates/meta/main.yml
new file mode 100644
index 000000000..2725fdb51
--- /dev/null
+++ b/roles/openshift_etcd_certificates/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Andrew Butcher
+ description: OpenShift etcd Certificates
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_etcd_facts
+- role: etcd_certificates
diff --git a/roles/fluentd_master/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml
index 148bc377e..925aa9f92 100644
--- a/roles/fluentd_master/meta/main.yml
+++ b/roles/openshift_etcd_facts/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: OpenShift Red Hat
- description: Fluentd Master
+ author: Andrew Butcher
+ description: OpenShift etcd Facts
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.9
@@ -10,6 +10,6 @@ galaxy_info:
versions:
- 7
categories:
- - monitoring
- dependencies:
- - openshift_facts
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
new file mode 100644
index 000000000..6f3894565
--- /dev/null
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -0,0 +1,5 @@
+---
+etcd_is_containerized: "{{ openshift.common.is_containerized }}"
+etcd_is_atomic: "{{ openshift.common.is_atomic }}"
+etcd_hostname: "{{ openshift.common.hostname }}"
+etcd_ip: "{{ openshift.common.ip }}"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index af388f6cf..7d81ac927 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.3.0
+XPAAS_VERSION=ose-v1.3.0-1
ORIGIN_VERSION=${1:-v1.2}
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
@@ -40,7 +40,7 @@ find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secre
wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
-cp ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-*.yaml ${EXAMPLES_BASE}/infrastructure-templates/enterprise/
+wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
index 672eaaa09..70c906f8e 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mongodb-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb"
}
@@ -232,7 +232,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
index d94262dde..e39ee57c8 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "mysql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
@@ -221,7 +221,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
index 5713411ad..347e01de3 100644
--- a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json
@@ -5,7 +5,7 @@
"name": "postgresql-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql"
}
@@ -220,7 +220,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
index c6cc98ce3..848e93c5f 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -69,6 +69,8 @@ objects:
value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- name: ES_OPS_RECOVER_AFTER_TIME
value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: MODE
+ value: ${MODE}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: logging-deployer
@@ -80,11 +82,11 @@ objects:
secretName: logging-deployer
parameters:
-
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ description: 'Specify image prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
- value: "registry.access.redhat.com/openshift3/"
+ value: registry.access.redhat.com/openshift3/
-
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployment:3.2.0", set version "3.2.0"'
name: IMAGE_VERSION
value: "3.2.0"
-
@@ -148,4 +150,7 @@ parameters:
description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
name: ES_OPS_RECOVER_AFTER_TIME
value: "5m"
-
+-
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml
index e8e8713be..67e49f327 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -79,7 +79,7 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "openshift/origin-"
+ value: "registry.access.redhat.com/openshift3/"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
index 9257b1f28..cc33f77d8 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml
@@ -1,156 +1,290 @@
apiVersion: "v1"
-kind: "Template"
-metadata:
- name: logging-deployer-template
- annotations:
- description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
- tags: "infrastructure"
-labels:
- logging-infra: deployer
- provider: openshift
- component: deployer
-objects:
+kind: "List"
+items:
-
- apiVersion: v1
- kind: Pod
+ apiVersion: "v1"
+ kind: "Template"
metadata:
- generateName: logging-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
- imagePullPolicy: Always
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: ENABLE_OPS_CLUSTER
- value: ${ENABLE_OPS_CLUSTER}
- - name: KIBANA_HOSTNAME
- value: ${KIBANA_HOSTNAME}
- - name: KIBANA_OPS_HOSTNAME
- value: ${KIBANA_OPS_HOSTNAME}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: ES_INSTANCE_RAM
- value: ${ES_INSTANCE_RAM}
- - name: ES_CLUSTER_SIZE
- value: ${ES_CLUSTER_SIZE}
- - name: ES_NODE_QUORUM
- value: ${ES_NODE_QUORUM}
- - name: ES_RECOVER_AFTER_NODES
- value: ${ES_RECOVER_AFTER_NODES}
- - name: ES_RECOVER_EXPECTED_NODES
- value: ${ES_RECOVER_EXPECTED_NODES}
- - name: ES_RECOVER_AFTER_TIME
- value: ${ES_RECOVER_AFTER_TIME}
- - name: ES_OPS_INSTANCE_RAM
- value: ${ES_OPS_INSTANCE_RAM}
- - name: ES_OPS_CLUSTER_SIZE
- value: ${ES_OPS_CLUSTER_SIZE}
- - name: ES_OPS_NODE_QUORUM
- value: ${ES_OPS_NODE_QUORUM}
- - name: ES_OPS_RECOVER_AFTER_NODES
- value: ${ES_OPS_RECOVER_AFTER_NODES}
- - name: ES_OPS_RECOVER_EXPECTED_NODES
- value: ${ES_OPS_RECOVER_EXPECTED_NODES}
- - name: ES_OPS_RECOVER_AFTER_TIME
- value: ${ES_OPS_RECOVER_AFTER_TIME}
- - name: FLUENTD_NODESELECTOR
- value: ${FLUENTD_NODESELECTOR}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: logging-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: logging-deployer
-parameters:
--
- description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "docker.io/openshift/origin-"
--
- description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "latest"
--
- description: "If true, set up to use a second ES cluster for ops logs."
- name: ENABLE_OPS_CLUSTER
- value: "false"
--
- description: "External hostname where clients will reach kibana"
- name: KIBANA_HOSTNAME
- required: true
--
- description: "External hostname at which admins will visit the ops Kibana."
- name: KIBANA_OPS_HOSTNAME
- value: kibana-ops.example.com
--
- description: "External URL for the master, for OAuth purposes"
- name: PUBLIC_MASTER_URL
- required: true
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc.cluster.local"
--
- description: "Amount of RAM to reserve per ElasticSearch instance."
- name: ES_INSTANCE_RAM
- value: "8G"
--
- description: "How many instances of ElasticSearch to deploy."
- name: ES_CLUSTER_SIZE
- required: true
--
- description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_NODE_QUORUM
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ - apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ secrets:
+ - name: logging-deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
-
- description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
- name: ES_RECOVER_AFTER_NODES
--
- description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
- name: ES_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
- name: ES_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "Amount of RAM to reserve per ops ElasticSearch instance."
- name: ES_OPS_INSTANCE_RAM
- value: "8G"
--
- description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
- name: ES_OPS_CLUSTER_SIZE
--
- description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
- name: ES_OPS_NODE_QUORUM
--
- description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_AFTER_NODES
--
- description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
- name: ES_OPS_RECOVER_EXPECTED_NODES
--
- description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
- name: ES_OPS_RECOVER_AFTER_TIME
- value: "5m"
--
- description: "The nodeSelector used for the Fluentd DaemonSet."
- name: FLUENTD_NODESELECTOR
- value: "logging-infra-fluentd=true"
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+ parameters:
+ -
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+ -
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+ -
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+ -
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+ -
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+ -
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
index 30d79acee..c620c46ec 100644
--- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml
@@ -54,8 +54,12 @@ objects:
value: ${IMAGE_VERSION}
- name: MASTER_URL
value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
- name: REDEPLOY
value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
- name: USE_PERSISTENT_STORAGE
value: ${USE_PERSISTENT_STORAGE}
- name: HAWKULAR_METRICS_HOSTNAME
@@ -66,6 +70,10 @@ objects:
value: ${CASSANDRA_PV_SIZE}
- name: METRIC_DURATION
value: ${METRIC_DURATION}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
dnsPolicy: ClusterFirst
restartPolicy: Never
serviceAccount: metrics-deployer
@@ -83,7 +91,7 @@ parameters:
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
name: IMAGE_VERSION
- value: "latest"
+ value: "v0.1.0"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
@@ -93,10 +101,18 @@ parameters:
name: HAWKULAR_METRICS_HOSTNAME
required: true
-
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
name: REDEPLOY
value: "false"
-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
description: "Set to true for persistent storage, set to false to use non persistent storage"
name: USE_PERSISTENT_STORAGE
value: "true"
@@ -112,3 +128,11 @@ parameters:
description: "How many days metrics should be stored for."
name: METRIC_DURATION
value: "7"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds"
+ name: METRIC_RESOLUTION
+ value: "10s"
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
index 6c143fc70..0d8dcffa1 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json
@@ -5,7 +5,7 @@
"name": "jenkins-persistent",
"creationTimestamp": null,
"annotations": {
- "description": "Jenkins service, with persistent storage.",
+ "description": "Jenkins service, with persistent storage. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins"
}
@@ -207,7 +207,7 @@
"name": "VOLUME_CAPACITY",
"displayName": "Volume Capacity",
"description": "Volume space available for data, e.g. 512Mi, 2Gi.",
- "value": "512Mi",
+ "value": "1Gi",
"required": true
}
],
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
index 3298ef40c..661bcbb69 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs-mongodb.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
index 82df67c4e..0518dfac7 100644
--- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/nodejs.json
@@ -112,7 +112,10 @@
"secret": "${GENERIC_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
}
},
{
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index aed4ec871..c9c7b378c 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -45,7 +45,7 @@ space on /dev/xvda, and the file system will be expanded to fill the new
partition space.
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -68,7 +68,7 @@ partition space.
* Create an ansible playbook, say `expandvar.yaml`:
```
- hosts: mynodes
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9218e12ae..f733fd5a8 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -56,12 +56,54 @@ def migrate_docker_facts(facts):
if 'node' in facts and 'portal_net' in facts['node']:
facts['docker']['hosted_registry_insecure'] = True
facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
+
+ # log_options was originally meant to be a comma separated string, but
+ # we now prefer an actual list, with backward compatability:
+ if 'log_options' in facts['docker'] and \
+ isinstance(facts['docker']['log_options'], basestring):
+ facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
+
+ return facts
+
+# TODO: We should add a generic migration function that takes source and destination
+# paths and does the right thing rather than one function for common, one for node, etc.
+def migrate_common_facts(facts):
+ """ Migrate facts from various roles into common """
+ params = {
+ 'node': ('portal_net'),
+ 'master': ('portal_net')
+ }
+ if 'common' not in facts:
+ facts['common'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['common'][param] = facts[role].pop(param)
+ return facts
+
+def migrate_node_facts(facts):
+ """ Migrate facts from various roles into node """
+ params = {
+ 'common': ('dns_ip'),
+ }
+ if 'node' not in facts:
+ facts['node'] = {}
+ for role in params.keys():
+ if role in facts:
+ for param in params[role]:
+ if param in facts[role]:
+ facts['node'][param] = facts[role].pop(param)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
- return migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_docker_facts(migrated_facts)
+ migrated_facts = migrate_common_facts(migrated_facts)
+ migrated_facts = migrate_node_facts(migrated_facts)
+ migrated_facts = migrate_hosted_facts(migrated_facts)
+ return migrated_facts
def migrate_hosted_facts(facts):
""" Apply migrations for master facts """
@@ -448,6 +490,27 @@ def set_metrics_facts_if_unset(facts):
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
+def set_dnsmasq_facts_if_unset(facts):
+ """ Set dnsmasq facts if not already present in facts
+ Args:
+ facts (dict) existing facts
+ Returns:
+ facts (dict) updated facts with values set if not previously set
+ """
+
+ if 'common' in facts:
+ if 'use_dnsmasq' not in facts['common'] and facts['common']['version_gte_3_2_or_1_2']:
+ facts['common']['use_dnsmasq'] = True
+ else:
+ facts['common']['use_dnsmasq'] = False
+ if 'master' in facts and 'dns_port' not in facts['master']:
+ if facts['common']['use_dnsmasq']:
+ facts['master']['dns_port'] = 8053
+ else:
+ facts['master']['dns_port'] = 53
+
+ return facts
+
def set_project_cfg_facts_if_unset(facts):
""" Set Project Configuration facts if not already present in facts dict
dict:
@@ -586,11 +649,13 @@ def set_aggregate_facts(facts):
"""
all_hostnames = set()
internal_hostnames = set()
+ kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
+ facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
@@ -607,9 +672,8 @@ def set_aggregate_facts(facts):
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
- first_svc_ip = first_ip(facts['master']['portal_net'])
- all_hostnames.add(first_svc_ip)
- internal_hostnames.add(first_svc_ip)
+ all_hostnames.add(kube_svc_ip)
+ internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
@@ -1154,7 +1218,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], str):
+ if isinstance(new[key], basestring):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1263,6 +1327,23 @@ def get_local_facts_from_file(filename):
return local_facts
+def sort_unique(alist):
+ """ Sorts and de-dupes a list
+
+ Args:
+ list: a list
+ Returns:
+ list: a sorted de-duped list
+ """
+
+ alist.sort()
+ out = list()
+ for i in alist:
+ if i not in out:
+ out.append(i)
+
+ return out
+
def safe_get_bool(fact):
""" Get a boolean fact safely.
@@ -1273,6 +1354,58 @@ def safe_get_bool(fact):
"""
return bool(strtobool(str(fact)))
+def set_proxy_facts(facts):
+ """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'common' in facts:
+ common = facts['common']
+ if 'http_proxy' in common or 'https_proxy' in common:
+ if 'generate_no_proxy_hosts' in common and \
+ common['generate_no_proxy_hosts']:
+ if 'no_proxy' in common and \
+ isinstance(common['no_proxy'], basestring):
+ common['no_proxy'] = common['no_proxy'].split(",")
+ else:
+ common['no_proxy'] = []
+ if 'no_proxy_internal_hostnames' in common:
+ common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
+ common['no_proxy'].append('.' + common['dns_domain'])
+ common['no_proxy'].append(common['hostname'])
+ common['no_proxy'] = sort_unique(common['no_proxy'])
+ facts['common'] = common
+
+ if 'builddefaults' in facts:
+ facts['master']['admission_plugin_config'] = dict()
+ builddefaults = facts['builddefaults']
+ common = facts['common']
+ if 'http_proxy' not in builddefaults and 'http_proxy' in common:
+ builddefaults['http_proxy'] = common['http_proxy']
+ if 'https_proxy' not in builddefaults and 'https_proxy' in common:
+ builddefaults['https_proxy'] = common['https_proxy']
+ if 'no_proxy' not in builddefaults and 'no_proxy' in common:
+ builddefaults['no_proxy'] = common['no_proxy']
+ if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
+ builddefaults['git_http_proxy'] = builddefaults['http_proxy']
+ if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
+ builddefaults['git_https_proxy'] = builddefaults['https_proxy']
+ if 'admission_plugin_config' not in builddefaults:
+ builddefaults['admission_plugin_config'] = dict()
+ if 'config' in builddefaults and ('http_proxy' in builddefaults or \
+ 'https_proxy' in builddefaults):
+ facts['master']['admission_plugin_config'].update(builddefaults['config'])
+ facts['builddefaults'] = builddefaults
+
+ return facts
+
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
@@ -1406,7 +1539,8 @@ class OpenShiftFacts(object):
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['cloudprovider',
+ known_roles = ['builddefaults',
+ 'cloudprovider',
'common',
'docker',
'etcd',
@@ -1490,9 +1624,11 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
+ facts = set_proxy_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
return dict(openshift=facts)
@@ -1519,6 +1655,7 @@ class OpenShiftFacts(object):
deployment_type=deployment_type,
hostname=hostname,
public_hostname=hostname,
+ portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
install_examples=True,
@@ -1546,7 +1683,7 @@ class OpenShiftFacts(object):
etcd_hosts='', etcd_port='4001',
portal_net='172.30.0.0/16',
embedded_etcd=True, embedded_kube=True,
- embedded_dns=True, dns_port='53',
+ embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
@@ -1576,6 +1713,24 @@ class OpenShiftFacts(object):
if 'hosted' in roles or self.role == 'hosted':
defaults['hosted'] = dict(
+ metrics=dict(
+ deploy=False,
+ duration=7,
+ resolution=10,
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='metrics',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'),
+ host=None,
+ access_modes=['ReadWriteMany'],
+ create_pv=True
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
@@ -1777,15 +1932,12 @@ class OpenShiftFacts(object):
if isinstance(val, basestring):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
+ # Convert legacy log_options comma sep string to a list if present:
+ if 'log_options' in new_local_facts['docker'] and \
+ isinstance(new_local_facts['docker']['log_options'], basestring):
+ new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
- for facts in new_local_facts.values():
- keys_to_delete = []
- if isinstance(facts, dict):
- for fact, value in facts.iteritems():
- if value == "" or value is None:
- keys_to_delete.append(fact)
- for key in keys_to_delete:
- del facts[key]
+ new_local_facts = self.remove_empty_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
@@ -1796,6 +1948,23 @@ class OpenShiftFacts(object):
self.changed = changed
return new_local_facts
+ def remove_empty_facts(self, facts=None):
+ """ Remove empty facts
+
+ Args:
+ facts (dict): facts to clean
+ """
+ facts_to_remove = []
+ for fact, value in facts.iteritems():
+ if isinstance(facts[fact], dict):
+ facts[fact] = self.remove_empty_facts(facts[fact])
+ else:
+ if value == "" or value == [""] or value is None:
+ facts_to_remove.append(fact)
+ for fact in facts_to_remove:
+ del facts[fact]
+ return facts
+
def validate_local_facts(self, facts=None):
""" Validate local facts
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 36def57c8..7510e4e39 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -33,3 +33,18 @@
is_containerized: "{{ l_is_containerized | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
+
+# had to be done outside of the above because hostname isn't yet set
+- name: Gather hostnames for proxy configuration
+ openshift_facts:
+ role: common
+ local_facts:
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml
index 6a36f74b2..4ccbf4430 100644
--- a/roles/openshift_hosted/tasks/router.yml
+++ b/roles/openshift_hosted/tasks/router.yml
@@ -32,6 +32,7 @@
{{ openshift.common.client_binary }} --api-version='v1' -o json
get nodes -n default --config={{ openshift.common.config_base }}/master/admin.kubeconfig
register: openshift_hosted_router_nodes_json
+ changed_when: false
when: openshift.hosted.router.replicas | default(None) == None
- name: Collect nodes matching router selector
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 6bf28ff2b..09bde6002 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -9,10 +9,10 @@ os_firewall_allow:
port: "{{ openshift.master.api_port }}/tcp"
- service: api controllers https
port: "{{ openshift.master.controllers_port }}/tcp"
-- service: dns tcp
- port: 53/tcp
-- service: dns udp
- port: 53/udp
+- service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+- service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
- service: Fluentd td-agent tcp
port: 24224/tcp
- service: Fluentd td-agent udp
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 0d4241e2c..e882e0b8b 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -15,5 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cli
- role: openshift_cloud_provider
+- role: openshift_builddefaults
- role: openshift_master_facts
- role: openshift_hosted_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 18a42bf93..fee6d3924 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -88,14 +88,41 @@
with_items: openshift.master.identity_providers
- name: Create the htpasswd file if needed
- copy:
+ template:
dest: "{{ item.filename }}"
- content: ""
+ src: htpasswd.j2
mode: 0600
- force: no
+ backup: yes
when: item.kind == 'HTPasswdPasswordIdentityProvider'
with_items: openshift.master.identity_providers
+- name: Create the ldap ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('ldap_ca.crt') }}"
+ content: "{{ openshift.master.ldap_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
+- name: Create the openid ca file if needed
+ copy:
+ dest: "{{ item.ca if 'ca' in item and '/' in item.ca else openshift_master_config_dir ~ '/' ~ item.ca | default('openid_ca.crt') }}"
+ content: "{{ openshift.master.openid_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != ''
+ with_items: openshift.master.identity_providers
+
+- name: Create the request header ca file if needed
+ copy:
+ dest: "{{ item.clientCA if 'clientCA' in item and '/' in item.clientCA else openshift_master_config_dir ~ '/' ~ item.clientCA | default('request_header_ca.crt') }}"
+ content: "{{ openshift.master.request_header_ca }}"
+ mode: 0600
+ backup: yes
+ when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
+ with_items: openshift.master.identity_providers
+
- name: Install the systemd units
include: systemd_units.yml
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 862cfa8f1..c70f3ec57 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -10,8 +10,14 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' in openshift.common or 'https_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.master.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% else %}
+#HTTP_PROXY=http://user:pass@proxy.example.com
+#HTTPS_PROXY=http://user:pass@proxy.example.com
+#NO_PROXY='.hosts.example.com'
+{% endif %}
+
diff --git a/roles/openshift_master/templates/htpasswd.j2 b/roles/openshift_master/templates/htpasswd.j2
new file mode 100644
index 000000000..ba2c02e20
--- /dev/null
+++ b/roles/openshift_master/templates/htpasswd.j2
@@ -0,0 +1,5 @@
+{% if 'htpasswd_users' in openshift.master %}
+{% for user,pass in openshift.master.htpasswd_users.iteritems() %}
+{{ user ~ ':' ~ pass }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 1009aa318..979b428bf 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -123,7 +123,7 @@ kubernetesMasterConfig:
keyFile: master.proxy-client.key
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
- servicesSubnet: {{ openshift.master.portal_net }}
+ servicesSubnet: {{ openshift.common.portal_net }}
staticNodeNames: {{ openshift_node_ips | default([], true) }}
{% endif %}
masterClients:
@@ -138,7 +138,7 @@ networkConfig:
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
- serviceNetworkCIDR: {{ openshift.master.portal_net }}
+ serviceNetworkCIDR: {{ openshift.common.portal_net }}
oauthConfig:
{% if 'oauth_always_show_provider_selection' in openshift.master %}
alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 69754ee10..549ebe5ab 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' or 'https_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.master.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% else %}
+#HTTP_PROXY=http://user:pass@proxy.example.com
+#HTTPS_PROXY=http://user:pass@proxy.example.com
+#NO_PROXY='.hosts.example.com'
+{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 048a4305a..08dc87d2e 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -10,8 +10,13 @@ AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
# Proxy configuration
-# Origin uses standard HTTP_PROXY environment variables. Be sure to set
-# NO_PROXY for your master
-#NO_PROXY=master.example.com
-#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
-#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT
+# See https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html#configuring-global-proxy
+{% if 'http_proxy' or 'https_proxy' in openshift.common %}
+HTTP_PROXY='{{ openshift.common.http_proxy | default('') }}'
+HTTPS_PROXY='{{ openshift.common.https_proxy | default('')}}'
+NO_PROXY='{{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.master.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}'
+{% else %}
+#HTTP_PROXY=http://user:pass@proxy.example.com
+#HTTPS_PROXY=http://user:pass@proxy.example.com
+#NO_PROXY='.hosts.example.com'
+{% endif %}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 4d7c04065..090511864 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -30,10 +30,10 @@
embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
+ # defaults to 8053 when using dnsmasq in 1.2/3.2
dns_port: "{{ openshift_master_dns_port | default(None) }}"
bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
pod_eviction_timeout: "{{ openshift_master_pod_eviction_timeout | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
session_name: "{{ openshift_master_session_name | default(None) }}"
session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
@@ -42,11 +42,15 @@
access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ htpasswd_users: "{{ openshift_master_htpasswd_users | default(lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else None) }}"
+ ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
+ openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
+ request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
registry_url: "{{ oreg_url | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
- default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain) | default(None) }}"
+ default_subdomain: "{{ openshift_master_default_subdomain | default(osm_default_subdomain | default(None), true) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
project_request_message: "{{ osm_project_request_message | default(None) }}"
@@ -54,7 +58,6 @@
mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
- router_selector: "{{ openshift_router_selector | default(None) }}"
registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
new file mode 100644
index 000000000..3b0ee2761
--- /dev/null
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -0,0 +1,14 @@
+builddefaults_yaml:
+ BuildDefaults:
+ configuration:
+ apiVersion: v1
+ kind: BuildDefaultsConfig
+ gitHTTPProxy: "{{ openshift.master.builddefaults_git_http_proxy | default(omit, true) }}"
+ gitHTTPSProxy: "{{ openshift.master.builddefaults_git_https_proxy | default(omit, true) }}"
+ env:
+ - name: HTTP_PROXY
+ value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}"
+ - name: HTTPS_PROXY
+ value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
+ - name: NO_PROXY
+ value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}" \ No newline at end of file
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
new file mode 100644
index 000000000..ec13d61d2
--- /dev/null
+++ b/roles/openshift_metrics/README.md
@@ -0,0 +1,53 @@
+OpenShift Metrics with Hawkular
+====================
+
+OpenShift Metrics Installation
+
+Requirements
+------------
+It requires subdomain fqdn to be set.
+If persistence is enabled, then it also requires NFS
+
+Role Variables
+--------------
+
+From this role:
+| Name | Default value | |
+|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
+| openshift_hosted_metrics_deploy | False | If metrics should be deployed |
+| openshift_hosted_metrics_storage_nfs_directory | /exports | Root export directory. |
+| openshift_hosted_metrics_storage_volume_name | metrics | Metrics volume within openshift_hosted_metrics_volume_dir |
+| openshift_hosted_metrics_storage_volume_size | 10Gi | Metrics volume size |
+| openshift_hosted_metrics_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. |
+| openshift_hosted_metrics_duration | 7 | Metrics query duration |
+| openshift_hosted_metrics_resolution | 10 | Metrics resolution |
+
+
+From openshift_common:
+| Name | Default Value | |
+|---------------------------------------|----------------|----------------------------------------|
+| openshift_master_default_subdomain | null | Subdomain FQDN (Mandatory) |
+
+
+Dependencies
+------------
+openshift_facts
+openshift_examples
+
+Example Playbook
+----------------
+
+- name: Configure openshift-metrics
+ hosts: oo_first_master
+ roles:
+ - role: openshift_metrics
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
new file mode 100644
index 000000000..5f8d4f5c5
--- /dev/null
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -0,0 +1,3 @@
+dependencies:
+- { role: openshift_examples }
+- { role: openshift_facts } \ No newline at end of file
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
new file mode 100644
index 000000000..ca29ad6e1
--- /dev/null
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{hawkular_tmp_conf}}
+ changed_when: false
+
+- name: Create metrics-deployer Service Account
+ shell: >
+ echo {{ deployer_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n openshift-infra
+ --config={{hawkular_tmp_conf}}
+ -f -
+ register: deployer_create_service_account
+ failed_when: "'already exists' not in deployer_create_service_account.stderr and deployer_create_service_account.rc != 0"
+ changed_when: deployer_create_service_account.rc == 0
+
+- name: Create metrics-deployer Secret
+ command: >
+ {{ openshift.common.client_binary }}
+ secrets new metrics-deployer
+ nothing=/dev/null
+ --config={{hawkular_tmp_conf}}
+ -n openshift-infra
+ register: deployer_create_secret
+ failed_when: "'already exists' not in deployer_create_secret.stderr and deployer_create_secret.rc !=0"
+ changed_when: deployer_create_secret.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{hawkular_tmp_conf}}
+ with_items: "{{hawkular_permission_oc_commands}}"
+ register: hawkular_perm_task
+ failed_when: "'already exists' not in hawkular_perm_task.stderr and hawkular_perm_task.rc != 0"
+ changed_when: hawkular_perm_task.rc == 0
+
+- name: Check openshift_master_default_subdomain
+ fail:
+ msg: "Default subdomain should be defined"
+ when: openshift.master.default_subdomain is not defined
+
+- name: Create Heapster and Hawkular/Cassandra Services
+ shell: >
+ {{ openshift.common.client_binary }} process -f \
+ /usr/share/openshift/examples/infrastructure-templates/{{ hawkular_type }}/metrics-deployer.yaml -v \
+ HAWKULAR_METRICS_HOSTNAME=hawkular-metrics.{{ openshift.master.default_subdomain }} USE_PERSISTENT_STORAGE={{ hawkular_persistence }} \
+ METRIC_DURATION={{ openshift.hosted.metrics.duration }} METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }} |
+ {{ openshift.common.client_binary }} create -n openshift-infra --config={{hawkular_tmp_conf}} -f -
+ register: oex_heapster_services
+ failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
+ changed_when: false
+
+- name: Clean temporary config file
+ command: >
+ rm -rf {{hawkular_tmp_conf}}
+ changed_when: false
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
new file mode 100644
index 000000000..82d9d29f7
--- /dev/null
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -0,0 +1,19 @@
+hawkular_permission_oc_commands:
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+
+deployer_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
+
+
+hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
+
+hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
+
+hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 84ba9ac2e..ca0c332ea 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -15,3 +15,6 @@ dependencies:
- role: openshift_docker
- role: openshift_cloud_provider
- role: openshift_common
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq
+
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 8987e0191..06fde88af 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -9,13 +9,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- # TODO: Replace this with a lookup or filter plugin.
- # TODO: Move this to the node role
- dns_ip: "{{ openshift_dns_ip
- | default(openshift_master_cluster_vip
- | default(None if openshift.common.version_gte_3_1_or_1_1 | bool else openshift_node_first_master_ip | default(None, true), true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -32,6 +25,7 @@
ovs_image: "{{ osn_ovs_image | default(None) }}"
proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}"
local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}"
+ dns_ip: "{{ openshift_dns_ip | default(none) | get_dns_ip(hostvars[inventory_hostname])}}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 28cb1ea26..9ba1a01dd 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,8 +1,8 @@
allowDisabledDocker: false
apiVersion: v1
dnsDomain: {{ openshift.common.dns_domain }}
-{% if 'dns_ip' in openshift.common %}
-dnsIP: {{ openshift.common.dns_ip }}
+{% if 'dns_ip' in openshift.node %}
+dnsIP: {{ openshift.node.dns_ip }}
{% endif %}
dockerConfig:
execHandlerName: ""
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
new file mode 100755
index 000000000..51e0751e9
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -0,0 +1,55 @@
+#!/bin/bash -x
+
+# This NetworkManager dispatcher script replicates the functionality of
+# NetworkManager's dns=dnsmasq however, rather than hardcoding the listening
+# address and /etc/resolv.conf to 127.0.0.1 it pulls the IP address from the
+# interface that owns the default route. This enables us to then configure pods
+# to use this IP address as their only resolver, where as using 127.0.0.1 inside
+# a pod would fail.
+#
+# To use this,
+# Drop this script in /etc/NetworkManager/dispatcher.d/
+# systemctl restart NetworkManager
+# Configure node-config.yaml to set dnsIP: to the ip address of this
+# node
+#
+# Test it:
+# host kubernetes.default.svc.cluster.local
+# host google.com
+#
+# TODO: I think this would be easy to add as a config option in NetworkManager
+# natively, look at hacking that up
+
+cd /etc/sysconfig/network-scripts
+. ./network-functions
+
+[ -f ../network ] && . ../network
+
+if [[ $2 =~ ^(up|dhcp4-change)$ ]]; then
+ # couldn't find an existing method to determine if the interface owns the
+ # default route
+ def_route=$(/sbin/ip route list match 0.0.0.0/0 | awk '{print $3 }')
+ def_route_int=$(/sbin/ip route get to ${def_route} | awk '{print $3}')
+ def_route_ip=$(/sbin/ip route get to ${def_route} | awk '{print $5}')
+ if [[ ${DEVICE_IFACE} == ${def_route_int} ]]; then
+ if [ ! -f /etc/dnsmasq.d/origin-dns.conf ]; then
+ cat << EOF > /etc/dnsmasq.d/origin-dns.conf
+strict-order
+no-resolv
+domain-needed
+server=/cluster.local/172.30.0.1
+server=/30.172.in-addr.arpa/172.30.0.1
+EOF
+ fi
+ # zero out our upstream servers list and feed it into dnsmasq
+ echo '' > /etc/dnsmasq.d/origin-upstream-dns.conf
+ for ns in ${DHCP4_DOMAIN_NAME_SERVERS}; do
+ echo "server=${ns}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ done
+ echo "listen-address=${def_route_ip}" >> /etc/dnsmasq.d/origin-upstream-dns.conf
+ systemctl restart dnsmasq
+
+ sed -i 's/^nameserver.*$/nameserver '"${def_route_ip}"'/g' /etc/resolv.conf
+ echo "# nameserver updated by /etc/NetworkManager/dispatcher.d/99-origin-dns.sh" >> /etc/resolv.conf
+ fi
+fi
diff --git a/roles/openshift_node_dnsmasq/handlers/main.yml b/roles/openshift_node_dnsmasq/handlers/main.yml
new file mode 100644
index 000000000..7e9e4d299
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: restart NetworkManager
+ service:
+ name: NetworkManager
+ state: restarted
diff --git a/roles/openshift_node_dnsmasq/meta/main.yml b/roles/openshift_node_dnsmasq/meta/main.yml
new file mode 100644
index 000000000..c83d64ae4
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: OpenShift Node DNSMasq support
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_common
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
new file mode 100644
index 000000000..4cb24469d
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Check for NetworkManager service
+ command: >
+ systemctl show NetworkManager
+ register: nm_show
+
+- name: Set fact using_network_manager
+ set_fact:
+ network_manager_active: "{{ True if 'ActiveState=active' in nm_show.stdout else False }}"
+
+- name: Install dnsmasq
+ action: "{{ ansible_pkg_mgr }} name=dnsmasq state=installed"
+ when: not openshift.common.is_atomic | bool
+
+- name: Install dnsmasq configuration
+ template:
+ src: origin-dns.conf.j2
+ dest: /etc/dnsmasq.d/origin-dns.conf
+
+# Dynamic NetworkManager based dispatcher
+- include: ./network-manager.yml
+ when: network_manager_active | bool
+
+# Relies on ansible in order to configure static config
+- include: ./no-network-manager.yml
+ when: not network_manager_active | bool
+
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
new file mode 100644
index 000000000..dddcfc9da
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -0,0 +1,9 @@
+---
+- name: Install network manager dispatch script
+ copy:
+ src: networkmanager/99-origin-dns.sh
+ dest: /etc/NetworkManager/dispatcher.d/
+ mode: 0755
+ notify: restart NetworkManager
+
+- meta: flush_handlers
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
new file mode 100644
index 000000000..cda90bd10
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -0,0 +1,2 @@
+---
+- fail: msg="Not implemented" \ No newline at end of file
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
new file mode 100644
index 000000000..1753bb821
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -0,0 +1,4 @@
+strict-order
+no-resolv
+domain-needed
+server=/{{ openshift.common.dns_domain }}/{{ openshift.common.kube_svc_ip }}
diff --git a/roles/openshift_storage_nfs/defaults/main.yml b/roles/openshift_storage_nfs/defaults/main.yml
index 5f6893129..90592e9d0 100644
--- a/roles/openshift_storage_nfs/defaults/main.yml
+++ b/roles/openshift_storage_nfs/defaults/main.yml
@@ -8,6 +8,14 @@ openshift:
options: "*(rw,root_squash)"
volume:
name: "registry"
+ metrics:
+ deploy: False
+ storage:
+ nfs:
+ directory: "/exports"
+ options: "*(rw,root_squash)"
+ volume:
+ name: "metrics"
os_firewall_use_firewalld: False
os_firewall_allow:
- service: nfs
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 1ee02e18a..3680ef5b5 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -62,7 +62,7 @@ Both of them are mounted into `/exports/openshift` directory. Both directories
exported via NFS. json files are created in /root.
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
@@ -87,7 +87,7 @@ exported via NFS. json files are created in /root.
* Create an ansible playbook, say `setupnfs.yaml`:
```
- hosts: nfsservers
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/roles/os_ipv6_disable/tasks/main.yaml b/roles/os_ipv6_disable/tasks/main.yaml
deleted file mode 100644
index fae5beee7..000000000
--- a/roles/os_ipv6_disable/tasks/main.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Disable ipv6 on RHEL7
-
-- name: Disable all ipv6
- sysctl: name="net.ipv6.conf.all.disable_ipv6" value=1 sysctl_set=yes state=present reload=yes
-
-- name: Disable default ipv6
- sysctl: name="net.ipv6.conf.default.disable_ipv6" value=1 sysctl_set=yes state=present reload=yes
-
-- name: Remove ipv6 localhost from /etc/hosts
- lineinfile: dest='/etc/hosts' regexp='^::1 ' state=absent owner=root group=root mode=0644
diff --git a/roles/os_reboot_server/tasks/main.yaml b/roles/os_reboot_server/tasks/main.yaml
deleted file mode 100644
index 581ed3e0a..000000000
--- a/roles/os_reboot_server/tasks/main.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# Role to reboot a server
-- name: Restart server
- shell: sleep 2 && shutdown -r now "Ansible updates triggered"
- async: 1
- poll: 0
- ignore_errors: true
-
-- name: Wait for server to restart
- local_action:
- module: wait_for
- host={{ ansible_ssh_host }}
- port=22
- delay=3
- timeout=300
- sudo: false
diff --git a/roles/os_utils/tasks/main.yaml b/roles/os_utils/tasks/main.yaml
deleted file mode 100644
index 346f6566f..000000000
--- a/roles/os_utils/tasks/main.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# Utility packages that make things helpful
-
-- name: Install useful rpm packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
- with_items:
- - wget
- - git
- - net-tools
- - bind-utils
- - iptables-services
- - bridge-utils
- - bash-completion
- - atop
- - htop
- - ack
- - telnet
diff --git a/roles/os_zabbix/README.md b/roles/os_zabbix/README.md
deleted file mode 100644
index ac3dc2833..000000000
--- a/roles/os_zabbix/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-os_zabbix
-=========
-
-Automate zabbix tasks.
-
-Requirements
-------------
-
-This requires the openshift_tools rpm be installed for the zbxapi.py library. It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
-
-Role Variables
---------------
-
-zab_server
-zab_username
-zab_password
-
-Dependencies
-------------
-
-This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
-
-Example Playbook
-----------------
-
- - zbx_host:
- server: zab_server
- user: zab_user
- password: zab_password
- name: 'myhost'
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/os_zabbix/defaults/main.yml b/roles/os_zabbix/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/os_zabbix/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/os_zabbix/handlers/main.yml b/roles/os_zabbix/handlers/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/os_zabbix/handlers/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/os_zabbix/meta/main.yml b/roles/os_zabbix/meta/main.yml
deleted file mode 100644
index 360f5aad2..000000000
--- a/roles/os_zabbix/meta/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: ZabbixAPI
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies:
-- lib_zabbix
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
deleted file mode 100644
index 1c8d88854..000000000
--- a/roles/os_zabbix/tasks/main.yml
+++ /dev/null
@@ -1,166 +0,0 @@
----
-- name: Main List all templates
- zbx_template:
- zbx_server: "{{ ozb_server }}"
- zbx_user: "{{ ozb_user }}"
- zbx_password: "{{ ozb_password }}"
- state: list
- register: templates
-
-- include_vars: template_heartbeat.yml
- tags:
- - heartbeat
-- include_vars: template_os_linux.yml
- tags:
- - linux
-- include_vars: template_docker.yml
- tags:
- - docker
-- include_vars: template_openshift_master.yml
- tags:
- - openshift_master
-- include_vars: template_openshift_node.yml
- tags:
- - openshift_node
-- include_vars: template_ops_tools.yml
- tags:
- - ops_tools
-- include_vars: template_app_zabbix_server.yml
- tags:
- - zabbix_server
-- include_vars: template_app_zabbix_agent.yml
- tags:
- - zabbix_agent
-- include_vars: template_performance_copilot.yml
- tags:
- - pcp
-- include_vars: template_aws.yml
- tags:
- - aws
-- include_vars: template_zagg_server.yml
- tags:
- - zagg_server
-
-- include_vars: template_config_loop.yml
- tags:
- - config_loop
-
-- name: Include Template Heartbeat
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_heartbeat }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - heartbeat
-
-- name: Include Template os_linux
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_os_linux }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - linux
-
-- name: Include Template docker
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_docker }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - docker
-
-- name: Include Template Openshift Master
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_openshift_master }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - openshift_master
-
-- name: Include Template Openshift Node
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_openshift_node }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - openshift_node
-
-- name: Include Template Ops Tools
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_ops_tools }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - ops_tools
-
-- name: Include Template App Zabbix Server
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_app_zabbix_server }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - zabbix_server
-
-- name: Include Template App Zabbix Agent
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_app_zabbix_agent }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - zabbix_agent
-
-- name: Include Template Performance Copilot
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_performance_copilot }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - pcp
-
-- name: Include Template AWS
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_aws }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - aws
-
-- name: Include Template Zagg Server
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_zagg_server }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - zagg_server
-
-- name: Include Template Config Loop
- include: ../../lib_zabbix/tasks/create_template.yml
- vars:
- template: "{{ g_template_config_loop }}"
- server: "{{ ozb_server }}"
- user: "{{ ozb_user }}"
- password: "{{ ozb_password }}"
- tags:
- - config_loop
diff --git a/roles/os_zabbix/vars/main.yml b/roles/os_zabbix/vars/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/os_zabbix/vars/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/os_zabbix/vars/template_app_zabbix_agent.yml b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
deleted file mode 100644
index d636d4822..000000000
--- a/roles/os_zabbix/vars/template_app_zabbix_agent.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-g_template_app_zabbix_agent:
- name: Template App Zabbix Agent
- zitems:
- - key: agent.hostname
- applications:
- - Zabbix agent
- value_type: character
- zabbix_type: agent
-
- - key: agent.ping
- applications:
- - Zabbix agent
- description: The agent always returns 1 for this item. It could be used in combination with nodata() for availability check.
- value_type: int
- zabbix_type: agent
-
- ztriggers:
- - name: '[Reboot] Zabbix agent on {HOST.NAME} is unreachable for 15 minutes'
- description: Zabbix agent is unreachable for 15 minutes.
- expression: '{Template App Zabbix Agent:agent.ping.nodata(15m)}=1'
- priority: high
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_ping.asciidoc
diff --git a/roles/os_zabbix/vars/template_app_zabbix_server.yml b/roles/os_zabbix/vars/template_app_zabbix_server.yml
deleted file mode 100644
index 43517113b..000000000
--- a/roles/os_zabbix/vars/template_app_zabbix_server.yml
+++ /dev/null
@@ -1,412 +0,0 @@
----
-g_template_app_zabbix_server:
- name: Template App Zabbix Server
- zitems:
- - key: housekeeper_creates
- applications:
- - Zabbix server
- description: A simple count of the number of partition creates output by the housekeeper script.
- units: ''
- value_type: int
- zabbix_type: internal
-
- - key: housekeeper_drops
- applications:
- - Zabbix server
- description: A simple count of the number of partition drops output by the housekeeper script.
- units: ''
- value_type: int
- zabbix_type: internal
-
- - key: housekeeper_errors
- applications:
- - Zabbix server
- description: A simple count of the number of errors output by the housekeeper script.
- units: ''
- value_type: int
- zabbix_type: internal
-
- - key: housekeeper_total
- applications:
- - Zabbix server
- description: A simple count of the total number of lines output by the housekeeper
- script.
- units: ''
- value_type: int
- zabbix_type: internal
-
- - key: zabbix[process,alerter,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,configuration syncer,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,db watchdog,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,discoverer,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,escalator,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,history syncer,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,housekeeper,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,http poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,icmp pinger,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,ipmi poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,java poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,node watcher,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,proxy poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,self-monitoring,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,snmp trapper,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,timer,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,trapper,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[process,unreachable poller,avg,busy]
- applications:
- - Zabbix server
- description: ''
- units: '%'
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[queue,10m]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: int
- zabbix_type: internal
- interval: 600
-
- - key: zabbix[queue]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: int
- zabbix_type: internal
- interval: 600
-
- - key: zabbix[rcache,buffer,pfree]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[wcache,history,pfree]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[wcache,text,pfree]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[wcache,trend,pfree]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: float
- zabbix_type: internal
-
- - key: zabbix[wcache,values]
- applications:
- - Zabbix server
- description: ''
- units: ''
- value_type: float
- zabbix_type: internal
- delta: 1 # speed per second
-
- ztriggers:
- - description: "There has been unexpected output while running the housekeeping script\
- \ on the Zabbix. There are only three kinds of lines we expect to see in the output,\
- \ and we've gotten something enw.\r\n\r\nCheck the script's output in /var/lib/zabbix/state\
- \ for more details."
- expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}+{Template App Zabbix Server:housekeeper_creates.last(0)}+{Template App Zabbix Server:housekeeper_drops.last(0)}<>{Template App Zabbix Server:housekeeper_total.last(0)}'
- name: Unexpected output in Zabbix DB Housekeeping
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_DB_Housekeeping.asciidoc
-
- - description: An error has occurred during running the housekeeping script on the Zabbix. Check the script's output in /var/lib/zabbix/state for more details.
- expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}>0'
- name: Errors during Zabbix DB Housekeeping
- priority: high
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,alerter,avg,busy].min(600)}>75'
- name: Zabbix alerter processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,configuration syncer,avg,busy].min(600)}>75'
- name: Zabbix configuration syncer processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,db watchdog,avg,busy].min(600)}>75'
- name: Zabbix db watchdog processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,discoverer,avg,busy].min(600)}>75'
- name: Zabbix discoverer processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,escalator,avg,busy].min(600)}>75'
- name: Zabbix escalator processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,history syncer,avg,busy].min(600)}>75'
- name: Zabbix history syncer processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,housekeeper,avg,busy].min(1800)}>75'
- name: Zabbix housekeeper processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,http poller,avg,busy].min(600)}>75'
- name: Zabbix http poller processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,icmp pinger,avg,busy].min(600)}>75'
- name: Zabbix icmp pinger processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,ipmi poller,avg,busy].min(600)}>75'
- name: Zabbix ipmi poller processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,java poller,avg,busy].min(600)}>75'
- name: Zabbix java poller processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,node watcher,avg,busy].min(600)}>75'
- name: Zabbix node watcher processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,poller,avg,busy].min(600)}>75'
- name: Zabbix poller processes more than 75% busy
- priority: high
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,proxy poller,avg,busy].min(600)}>75'
- name: Zabbix proxy poller processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,self-monitoring,avg,busy].min(600)}>75'
- name: Zabbix self-monitoring processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,snmp trapper,avg,busy].min(600)}>75'
- name: Zabbix snmp trapper processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: Timer processes usually are busy because they have to process time
- based trigger functions
- expression: '{Template App Zabbix Server:zabbix[process,timer,avg,busy].min(600)}>75'
- name: Zabbix timer processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,trapper,avg,busy].min(600)}>75'
- name: Zabbix trapper processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[process,unreachable poller,avg,busy].min(600)}>75'
- name: Zabbix unreachable poller processes more than 75% busy
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
-
- - description: "This alert generally indicates a performance problem or a problem\
- \ with the zabbix-server or proxy.\r\n\r\nThe first place to check for issues\
- \ is Administration > Queue. Be sure to check the general view and the per-proxy\
- \ view."
- expression: '{Template App Zabbix Server:zabbix[queue,10m].min(600)}>1000'
- name: More than 1000 items having missing data for more than 10 minutes
- priority: high
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/data_lost_overview_plugin.asciidoc
-
- - description: Consider increasing CacheSize in the zabbix_server.conf configuration
- file
- expression: '{Template App Zabbix Server:zabbix[rcache,buffer,pfree].min(600)}<5'
- name: Less than 5% free in the configuration cache
- priority: info
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[wcache,history,pfree].min(600)}<25'
- name: Less than 25% free in the history cache
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[wcache,text,pfree].min(600)}<25'
- name: Less than 25% free in the text history cache
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
-
- - description: ''
- expression: '{Template App Zabbix Server:zabbix[wcache,trend,pfree].min(600)}<25'
- name: Less than 25% free in the trends cache
- priority: avg
- url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
diff --git a/roles/os_zabbix/vars/template_aws.yml b/roles/os_zabbix/vars/template_aws.yml
deleted file mode 100644
index 57832a3fe..000000000
--- a/roles/os_zabbix/vars/template_aws.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-g_template_aws:
- name: Template AWS
- zdiscoveryrules:
- - name: disc.aws
- key: disc.aws
- lifetime: 14
- description: "Dynamically register AWS bucket info"
-
- zitemprototypes:
- - discoveryrule_key: disc.aws
- name: "S3 bucket size (GB) [{#S3_BUCKET}]"
- key: "disc.aws.size[{#S3_BUCKET}]"
- value_type: int
- description: "Size of S3 bucket"
- applications:
- - AWS
-
- - discoveryrule_key: disc.aws
- name: "S3 bucket object count [{#S3_BUCKET}]"
- key: "disc.aws.objects[{#S3_BUCKET}]"
- value_type: int
- description: "Objects in S3 bucket"
- applications:
- - AWS
diff --git a/roles/os_zabbix/vars/template_config_loop.yml b/roles/os_zabbix/vars/template_config_loop.yml
deleted file mode 100644
index 823da1868..000000000
--- a/roles/os_zabbix/vars/template_config_loop.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-g_template_config_loop:
- name: Template Config Loop
- zitems:
- - key: config_loop.run.exit_code
- applications:
- - Config Loop
- value_type: int
-
- ztriggers:
- - name: 'config_loop.run.exit_code not zero on {HOST.NAME}'
- expression: '{Template Config Loop:config_loop.run.exit_code.min(#2)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_config_loop.asciidoc'
- priority: average
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
deleted file mode 100644
index dd13e76f7..000000000
--- a/roles/os_zabbix/vars/template_docker.yml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-g_template_docker:
- name: Template Docker
- zitems:
- - key: docker.ping
- applications:
- - Docker Daemon
- value_type: int
-
- - key: docker.info_elapsed_ms
- applications:
- - Docker Daemon
- value_type: int
-
- - key: docker.container.dns.resolution
- applications:
- - Docker Daemon
- value_type: int
-
- - key: docker.container.existing.dns.resolution.failed
- applications:
- - Docker Daemon
- value_type: int
-
- - key: docker.storage.is_loopback
- applications:
- - Docker Storage
- value_type: int
-
- - key: docker.storage.data.space.total
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.data.space.used
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.data.space.available
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.data.space.percent_available
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.metadata.space.total
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.metadata.space.used
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.metadata.space.available
- applications:
- - Docker Storage
- value_type: float
-
- - key: docker.storage.metadata.space.percent_available
- applications:
- - Docker Storage
- value_type: float
- ztriggers:
- - name: 'docker.ping failed on {HOST.NAME}'
- expression: '{Template Docker:docker.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_ping.asciidoc'
- priority: high
-
- # Re-enable for OpenShift 3.1.1 (https://bugzilla.redhat.com/show_bug.cgi?id=1292971#c6)
- - name: 'docker.container.dns.resolution failed on {HOST.NAME}'
- expression: '{Template Docker:docker.container.dns.resolution.min(#3)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc'
- priority: average
- status: disabled
-
- - name: 'docker.container.existing.dns.resolution.failed on {HOST.NAME}'
- expression: '{Template Docker:docker.container.existing.dns.resolution.failed.min(#3)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_dns.asciidoc'
- priority: average
-
- - name: 'Docker storage is using LOOPBACK on {HOST.NAME}'
- expression: '{Template Docker:docker.storage.is_loopback.last()}<>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_loopback.asciidoc'
- priority: high
-
- - name: 'Critically low docker storage data space on {HOST.NAME}'
- expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.data.space.available.max(#3)}<5' # < 5% or < 5GB
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
- priority: high
-
- - name: 'Critically low docker storage metadata space on {HOST.NAME}'
- expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<5 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.005' # < 5% or < 5MB
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
- priority: high
-
- # Put triggers that depend on other triggers here (deps must be created first)
- - name: 'Low docker storage data space on {HOST.NAME}'
- expression: '{Template Docker:docker.storage.data.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.data.space.available.max(#3)}<10' # < 10% or < 10GB
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
- dependencies:
- - 'Critically low docker storage data space on {HOST.NAME}'
- priority: average
-
- - name: 'Low docker storage metadata space on {HOST.NAME}'
- expression: '{Template Docker:docker.storage.metadata.space.percent_available.max(#3)}<10 or {Template Docker:docker.storage.metadata.space.available.max(#3)}<0.01' # < 10% or < 10MB
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_docker_storage.asciidoc'
- dependencies:
- - 'Critically low docker storage metadata space on {HOST.NAME}'
- priority: average
-
diff --git a/roles/os_zabbix/vars/template_heartbeat.yml b/roles/os_zabbix/vars/template_heartbeat.yml
deleted file mode 100644
index ec953c79b..000000000
--- a/roles/os_zabbix/vars/template_heartbeat.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-g_template_heartbeat:
- name: Template Heartbeat
- zitems:
- - name: Heartbeat Ping
- applications:
- - Heartbeat
- key: heartbeat.ping
- ztriggers:
- - name: 'Heartbeat.ping has failed on {HOST.NAME}'
- expression: '{Template Heartbeat:heartbeat.ping.nodata(20m)}=1'
- priority: avg
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_node_heartbeat.asciidoc'
-
- - name: 'Heartbeat.ping has failed (60 min) on {HOST.NAME}'
- expression: '{Template Heartbeat:heartbeat.ping.nodata(60m)}=1'
- priority: high
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_node_heartbeat.asciidoc'
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
deleted file mode 100644
index a38db9f65..000000000
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ /dev/null
@@ -1,458 +0,0 @@
----
-g_template_openshift_master:
- name: Template Openshift Master
- zitems:
- - name: openshift.master.app.create
- applications:
- - Openshift Master
- key: openshift.master.app.create
-
- - key: openshift.master.app.build.create
- description: "check the app create with a build process"
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.app.create.time
- description: "check the time it takes app create with a build process"
- value_type: float
- applications:
- - Openshift Master
-
- - key: openshift.master.app.build.time
- description: "check the time it takes app build"
- value_type: float
- applications:
- - Openshift Master
-
- - key: openshift.master.process.count
- description: Shows number of master processes running
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.api.ping
- description: "Verify that the Openshift API is up (uses the cluster API URL)"
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.local.api.ping
- description: "Verify that the Openshift API is up on the host (uses the API URL as the https://127.0.0.1)"
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.api.healthz
- description: "Checks the healthz check of the master's api: https://<cluster_api_url>/healthz"
- value_type: int
- data_type: bool
- applications:
- - Openshift Master
-
- - key: openshift.master.local.api.healthz
- description: "Checks the healthz check of the master's api: https://127.0.0.1/healthz"
- value_type: int
- data_type: bool
- applications:
- - Openshift Master
-
- - key: openshift.master.user.count
- description: Shows number of users in a cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pod.running.count
- description: Shows number of pods running
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pod.user.running.count
- description: Shows number of user pods running (non infrastructure pods)
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pod.total.count
- description: Shows total number of pods (running and non running)
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.node.count
- description: Shows the total number of nodes found in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.project.count
- description: Shows number of projects on a cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.space.total
- description: Shows the total space of pv
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.space.available
- description: Shows the available space of pv
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.total.count
- description: Total number of Persistent Volumes in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.available.count
- description: Total number of Available Persistent Volumes in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.released.count
- description: Total number of Released Persistent Volumes in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.bound.count
- description: Total number of Bound Persistent Volumes in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.pv.failed.count
- description: Total number of Failed Persistent Volumes in the Openshift Cluster
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.skydns.port.open
- description: State of the SkyDNS port open and listening
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.skydns.query
- description: SkyDNS can be queried or not
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.etcd.create.success
- description: Show number of successful create actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.create.fail
- description: Show number of failed create actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.delete.success
- description: Show number of successful delete actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.delete.fail
- description: Show number of failed delete actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.get.success
- description: Show number of successful get actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.get.fail
- description: Show number of failed get actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.set.success
- description: Show number of successful set actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.set.fail
- description: Show number of failed set actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.update.success
- description: Show number of successful update actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.update.fail
- description: Show number of failed update actions
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.watchers
- description: Show number of etcd watchers
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.etcd.ping
- description: etcd ping
- value_type: int
- applications:
- - Openshift Etcd
-
- - key: openshift.master.metric.ping
- description: "This check verifies that the https://master/metrics check is alive and communicating properly."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.nodesnotready.count
- description: "This check shows how many nodes in a cluster are in NotReady state."
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.nodesnotschedulable.count
- description: "This check shows how many nodes in a cluster are not schedulable."
- value_type: int
- applications:
- - Openshift Master
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.list.5
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.list.9
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.list.99
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.5
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.9
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.99
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the pod operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.5
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the end to end scheduling operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.9
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the end to end scheduling operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.99
- description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the end to end scheduling operations have taken to completed."
- value_type: int
- applications:
- - Openshift Master Metrics
-
- zdiscoveryrules:
- - name: disc.pv
- key: disc.pv
- lifetime: 1
- description: "Dynamically register the Persistent Volumes"
-
- zitemprototypes:
- - discoveryrule_key: disc.pv
- name: "disc.pv.count.{#OSO_PV}"
- key: "disc.pv.count[{#OSO_PV}]"
- value_type: int
- description: "Number of PV's of this size"
- applications:
- - Openshift Master
-
- - discoveryrule_key: disc.pv
- name: "disc.pv.available.{#OSO_PV}"
- key: "disc.pv.available[{#OSO_PV}]"
- value_type: int
- description: "Number of PV's of this size that are available"
- applications:
- - Openshift Master
-
- ztriggers:
- - name: 'Openshift Master process not running on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Too many Openshift Master processes running on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.process.count.min(#3)}>1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Etcd ping failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
- priority: high
-
- - name: 'Number of users for Openshift Master on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: info
-
- - name: 'There are no projects running on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.project.count.last()}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: info
-
- # Put triggers that depend on other triggers here (deps must be created first)
- - name: 'Application creation has failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.app.create.last(#1)}=1 and {Template Openshift Master:openshift.master.app.create.last(#2)}=1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: avg
-
- - name: 'Application creation with build has failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.app.build.create.last(#1)}=1 and {Template Openshift Master:openshift.master.app.build.create.last(#2)}=1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: avg
-
- - name: 'Application creation has failed multiple times in the last hour on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.app.create.sum(1h)}>3'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- description: The application create loop has failed 4 or more times in the last hour
- priority: avg
-
- - name: 'Application with build creation has failed multiple times in the last 2 hour on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.app.build.create.sum(2h)}>3'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- description: The application create loop has failed 4 or more times in the last hour
- priority: avg
-
- - name: 'Openshift Master API health check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master Local API health check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.local.api.healthz.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: high
-
- - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- priority: high
-
- - name: 'Openshift Master Local API PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.local.api.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: high
-
- - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: avg
-
- - name: 'SkyDNS port not listening on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.skydns.port.open.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: high
-
- - name: 'SkyDNS query failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.skydns.query.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
- dependencies:
- - 'Openshift Master API health check is failing on {HOST.NAME}'
- priority: high
-
- - name: 'Hosts not ready according to {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.nodesnotready.count.last(#2)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_node.asciidoc'
- dependencies:
- - 'Openshift Master process not running on {HOST.NAME}'
- priority: high
-
- zgraphs:
- - name: Openshift Master API Server Latency Pods LIST Quantiles
- width: 900
- height: 200
- graph_items:
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.5
- color: red
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.9
- color: blue
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.99
- color: orange
-
- - name: Openshift Master API Server Latency Pods WATCHLIST Quantiles
- width: 900
- height: 200
- graph_items:
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.5
- color: red
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.9
- color: blue
- - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.99
- color: orange
-
- - name: Openshift Master Scheduler End to End Latency Quantiles
- width: 900
- height: 200
- graph_items:
- - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.5
- color: red
- - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.9
- color: blue
- - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.99
- color: orange
diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml
deleted file mode 100644
index 9f84a2cdf..000000000
--- a/roles/os_zabbix/vars/template_openshift_node.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-g_template_openshift_node:
- name: Template Openshift Node
- zitems:
- - key: openshift.node.process.count
- description: Shows number of OpenShift Node processes running
- value_type: int
- applications:
- - Openshift Node
-
- - key: openshift.node.ovs.pids.count
- description: Shows number of ovs process ids running
- value_type: int
- applications:
- - Openshift Node
-
- - key: openshift.node.ovs.ports.count
- description: Shows number of OVS ports defined
- value_type: int
- applications:
- - Openshift Node
-
- - key: openshift.node.ovs.stray.rules
- description: Number of OVS stray rules found/removed
- value_type: int
- applications:
- - Openshift Node
-
- - key: openshift.node.registry-pods.healthy_pct
- description: Shows the percentage of healthy registries in the cluster
- value_type: int
- applications:
- - Openshift Node
-
- - key: openshift.node.registry.service.ping
- description: Ping docker-registry service from node
- value_type: int
- applications:
- - Openshift Node
-
- ztriggers:
- - name: 'One or more Docker Registries is unhealthy according to {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#2)}<100 and {Template Openshift Node:openshift.node.registry-pods.healthy_pct.last(#1)}<100'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc'
- priority: avg
-
- - name: 'Docker Registry service is unhealthy according to {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.registry.service.ping.last(#2)}<1 and {Template Openshift Node:openshift.node.registry.service.ping.last(#1)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_registry.asciidoc'
- priority: avg
-
- - name: 'Openshift Node process not running on {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
- priority: high
-
- - name: 'Too many Openshift Node processes running on {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.process.count.min(#3)}>1'
- url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
- priority: high
-
- - name: '[Heal] OVS may not be running on {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last(#1)}<>4 and {Template Openshift Node:openshift.node.ovs.pids.count.last(#2)}<>4'
- url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
- priority: high
-
- - name: 'Number of OVS ports is 0 on {HOST.NAME}'
- expression: '{Template Openshift Node:openshift.node.ovs.ports.count.last()}=0'
- url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
- priority: high
diff --git a/roles/os_zabbix/vars/template_ops_tools.yml b/roles/os_zabbix/vars/template_ops_tools.yml
deleted file mode 100644
index a0a5a4d03..000000000
--- a/roles/os_zabbix/vars/template_ops_tools.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-g_template_ops_tools:
- name: Template Operations Tools
- zdiscoveryrules:
- - name: disc.ops.runner
- key: disc.ops.runner
- lifetime: 1
- description: "Dynamically register operations runner items"
-
- zitemprototypes:
- - discoveryrule_key: disc.ops.runner
- name: "Exit code of ops-runner[{#OSO_COMMAND}]"
- key: "disc.ops.runner.command.exitcode[{#OSO_COMMAND}]"
- value_type: int
- description: "The exit code of the command run from ops-runner"
- applications:
- - Ops Runner
-
- ztriggerprototypes:
- - name: 'ops-runner[{#OSO_COMMAND}]: non-zero exit code on {HOST.NAME}'
- expression: '{Template Operations Tools:disc.ops.runner.command.exitcode[{#OSO_COMMAND}].last()}<>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_ops_runner_command.asciidoc'
- priority: average
-
- zactions:
- - name: 'Remote command for [Heal] triggers'
- status: enabled
- escalation_time: 60
- conditions_filter:
- calculation_type: "and/or"
- conditions:
- - conditiontype: maintenance status
- operator: not in
- - conditiontype: trigger name
- operator: like
- value: "[Heal]"
- - conditiontype: trigger value
- operator: "="
- value: PROBLEM
- operations:
- - esc_step_from: 1
- esc_step_to: 1
- esc_period: 0
- operationtype: remote command
- opcommand:
- command: 'ssh -i /etc/openshift_tools/scriptrunner_id_rsa {{ ozb_scriptrunner_user }}@{{ ozb_scriptrunner_bastion_host }} remote-healer --host \"{HOST.NAME}\" --trigger \"{TRIGGER.NAME}\" --trigger-val \"{TRIGGER.VALUE}\"'
- execute_on: "zabbix server"
- type: 'custom script'
- target_hosts:
- - target_type: 'zabbix server'
- opconditions:
- - conditiontype: 'event acknowledged'
- operator: '='
- value: 'not acknowledged'
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
deleted file mode 100644
index c6e557f12..000000000
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ /dev/null
@@ -1,314 +0,0 @@
----
-g_template_os_linux:
- name: Template OS Linux
- zitems:
- - key: kernel.uname.sysname
- applications:
- - Kernel
- value_type: string
-
- - key: kernel.all.cpu.wait.total
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.cpu.irq.hard
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.cpu.idle
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.uname.distro
- applications:
- - Kernel
- value_type: string
-
- - key: kernel.uname.nodename
- applications:
- - Kernel
- value_type: string
-
- - key: kernel.all.cpu.irq.soft
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.load.15_minute
- applications:
- - Kernel
- value_type: float
-
- - key: kernel.all.cpu.sys
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.load.5_minute
- applications:
- - Kernel
- value_type: float
-
- - key: kernel.all.cpu.nice
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.load.1_minute
- applications:
- - Kernel
- value_type: float
-
- - key: kernel.uname.version
- applications:
- - Kernel
- value_type: string
-
- - key: kernel.all.uptime
- applications:
- - Kernel
- value_type: int
-
- - key: kernel.all.cpu.user
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.uname.machine
- applications:
- - Kernel
- value_type: string
-
- - key: hinv.ncpu
- applications:
- - Kernel
- value_type: int
-
- - key: kernel.all.cpu.steal
- applications:
- - Kernel
- value_type: float
- units: '%'
-
- - key: kernel.all.pswitch
- applications:
- - Kernel
- value_type: int
-
- - key: kernel.uname.release
- applications:
- - Kernel
- value_type: string
-
- - key: proc.nprocs
- applications:
- - Kernel
- value_type: int
-
- # Memory Items
- - key: mem.freemem
- applications:
- - Memory
- value_type: int
- description: "PCP: free system memory metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: mem.util.bufmem
- applications:
- - Memory
- value_type: int
- description: "PCP: Memory allocated for buffer_heads.; I/O buffers metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: swap.used
- applications:
- - Memory
- value_type: int
- description: "PCP: swap used metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: swap.length
- applications:
- - Memory
- value_type: int
- description: "PCP: total swap available metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: mem.physmem
- applications:
- - Memory
- value_type: int
- description: "PCP: The value of this metric corresponds to the \"MemTotal\" field reported by /proc/meminfo. Note that this does not necessarily correspond to actual installed physical memory - there may be areas of the physical address space mapped as ROM in various peripheral devices and the bios may be mirroring certain ROMs in RAM."
- multiplier: 1024
- units: B
-
- - key: swap.free
- applications:
- - Memory
- value_type: int
- description: "PCP: swap free metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: mem.util.available
- applications:
- - Memory
- value_type: int
- description: "PCP: The amount of memory that is available for a new workload, without pushing the system into swap. Estimated from MemFree, Active(file), Inactive(file), and SReclaimable, as well as the \"low\" watermarks from /proc/zoneinfo.; available memory from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: mem.util.used
- applications:
- - Memory
- value_type: int
- description: "PCP: Used memory is the difference between mem.physmem and mem.freemem; used memory metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- - key: mem.util.cached
- applications:
- - Memory
- value_type: int
- description: "PCP: Memory used by the page cache, including buffered file data. This is in-memory cache for files read from the disk (the pagecache) but doesn't include SwapCached.; page cache metric from /proc/meminfo"
- multiplier: 1024
- units: B
-
- zdiscoveryrules:
- - name: disc.filesys
- key: disc.filesys
- lifetime: 1
- description: "Dynamically register the filesystems"
-
- - name: disc.disk
- key: disc.disk
- lifetime: 1
- description: "Dynamically register disks on a node"
-
- - name: disc.network
- key: disc.network
- lifetime: 1
- description: "Dynamically register network interfaces on a node"
-
- zitemprototypes:
- - discoveryrule_key: disc.filesys
- name: "disc.filesys.full.{#OSO_FILESYS}"
- key: "disc.filesys.full[{#OSO_FILESYS}]"
- value_type: float
- description: "PCP filesys.full option. This is the percent full returned from pcp filesys.full"
- applications:
- - Disk
-
- - discoveryrule_key: disc.filesys
- name: "Percentage of used inodes on {#OSO_FILESYS}"
- key: "disc.filesys.inodes.pused[{#OSO_FILESYS}]"
- value_type: float
- description: "PCP derived value of percentage of used inodes on a filesystem."
- applications:
- - Disk
-
- - discoveryrule_key: disc.disk
- name: "TPS (IOPS) for disk {#OSO_DISK}"
- key: "disc.disk.tps[{#OSO_DISK}]"
- value_type: int
- description: "PCP disk.dev.totals metric measured over a period of time. This shows how many disk transactions per second the disk is using"
- applications:
- - Disk
-
- - discoveryrule_key: disc.disk
- name: "Percent Utilized for disk {#OSO_DISK}"
- key: "disc.disk.putil[{#OSO_DISK}]"
- value_type: float
- description: "PCP disk.dev.avactive metric measured over a period of time. This is the '%util' in the iostat command"
- applications:
- - Disk
-
- - discoveryrule_key: disc.network
- name: "Bytes per second IN on network interface {#OSO_NET_INTERFACE}"
- key: "disc.network.in.bytes[{#OSO_NET_INTERFACE}]"
- value_type: int
- units: B
- delta: 1
- description: "PCP network.interface.in.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
- applications:
- - Network
-
- - discoveryrule_key: disc.network
- name: "Bytes per second OUT on network interface {#OSO_NET_INTERFACE}"
- key: "disc.network.out.bytes[{#OSO_NET_INTERFACE}]"
- value_type: int
- units: B
- delta: 1
- description: "PCP network.interface.out.bytes metric. This is setup as a delta in Zabbix to measure the speed per second"
- applications:
- - Network
-
- ztriggerprototypes:
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: high
-
- # This has a dependency on the previous trigger
- # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
- - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
- dependencies:
- - 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
-
- - name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: high
-
- # This has a dependency on the previous trigger
- # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
- dependencies:
- - 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
-
- ztriggers:
- - name: 'Too many TOTAL processes on {HOST.NAME}'
- expression: '{Template OS Linux:proc.nprocs.last()}>5000'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_proc.asciidoc'
- priority: warn
-
- - name: 'Lack of available memory on {HOST.NAME}'
- expression: '{Template OS Linux:mem.freemem.last()}<30720000'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_memory.asciidoc'
- priority: warn
- description: 'Alert on less than 30MegaBytes. This is 30 Million Bytes. 30000 KB x 1024'
-
- # CPU Utilization #
- - name: 'CPU idle less than 5% on {HOST.NAME}'
- expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<5'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
- priority: average
- description: 'CPU is less than 5% idle'
-
- - name: 'CPU idle less than 10% on {HOST.NAME}'
- expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<10'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
- priority: average
- description: 'CPU is less than 10% idle'
- dependencies:
- - 'CPU idle less than 5% on {HOST.NAME}'
diff --git a/roles/os_zabbix/vars/template_performance_copilot.yml b/roles/os_zabbix/vars/template_performance_copilot.yml
deleted file mode 100644
index b62fa0228..000000000
--- a/roles/os_zabbix/vars/template_performance_copilot.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-g_template_performance_copilot:
- name: Template Performance Copilot
- zitems:
- - key: pcp.ping
- applications:
- - Performance Copilot
- value_type: int
-
- ztriggers:
- - name: 'pcp.ping failed on {HOST.NAME}'
- expression: '{Template Performance Copilot:pcp.ping.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_pcp_ping.asciidoc'
- priority: average
diff --git a/roles/os_zabbix/vars/template_zagg_server.yml b/roles/os_zabbix/vars/template_zagg_server.yml
deleted file mode 100644
index db5665993..000000000
--- a/roles/os_zabbix/vars/template_zagg_server.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-g_template_zagg_server:
- name: Template Zagg Server
- zitems:
- - key: zagg.server.metrics.count
- applications:
- - Zagg Server
- value_type: int
-
- - key: zagg.server.metrics.errors
- applications:
- - Zagg Server
- value_type: int
-
- - key: zagg.server.heartbeat.errors
- applications:
- - Zagg Server
- value_type: int
-
- - key: zagg.server.heartbeat.count
- applications:
- - Zagg Server
- value_type: int
-
- ztriggers:
- - name: 'Error processing metrics on {HOST.NAME}'
- expression: '{Template Zagg Server:zagg.server.metrics.errors.min(#3)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc'
- priority: average
-
- - name: 'Error processing heartbeats on {HOST.NAME}'
- expression: '{Template Zagg Server:zagg.server.heartbeat.errors.min(#3)}>0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc'
- priority: average
-
- - name: 'Critically High number of metrics in Zagg queue {HOST.NAME}'
- expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>10000'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc'
- priority: high
-
- - name: 'High number of metrics in Zagg queue {HOST.NAME}'
- expression: '{Template Zagg Server:zagg.server.metrics.count.min(#3)}>5000'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/zagg_server.asciidoc'
- dependencies:
- - 'Critically High number of metrics in Zagg queue {HOST.NAME}'
- priority: average
diff --git a/roles/oso_host_monitoring/README.md b/roles/oso_host_monitoring/README.md
deleted file mode 100644
index f1fa05adb..000000000
--- a/roles/oso_host_monitoring/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-Role Name
-=========
-
-Applies local host monitoring container(s).
-
-Requirements
-------------
-
-None.
-
-Role Variables
---------------
-
-osohm_zagg_web_url: where to contact monitoring service
-osohm_host_monitoring: name of host monitoring container
-osohm_zagg_client: name of container with zabbix client
-osohm_docker_registry_url: docker repository containing above containers
-osohm_default_zagg_server_user: login info to zabbix server
-osohm_default_zagg_password: password to zabbix server
-
-Dependencies
-------------
-
-None.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - oso_host_monitoring
- vars:
- osohm_zagg_web_url: "https://..."
- osohm_host_monitoring: "oso-rhel7-host-monitoring"
- osohm_zagg_client: "oso-rhel7-zagg-client"
- osohm_docker_registry_url: "docker-registry.example.com/mon/"
- osohm_default_zagg_server_user: "zagg-client"
- osohm_default_zagg_password: "secret"
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-OpenShift operations, Red Hat, Inc
diff --git a/roles/oso_host_monitoring/defaults/main.yml b/roles/oso_host_monitoring/defaults/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/oso_host_monitoring/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml
deleted file mode 100644
index 3a5d8024c..000000000
--- a/roles/oso_host_monitoring/handlers/main.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Restart the {{ osohm_host_monitoring }} service"
- service:
- name: "{{ osohm_host_monitoring }}"
- state: restarted
- enabled: yes
diff --git a/roles/oso_host_monitoring/meta/main.yml b/roles/oso_host_monitoring/meta/main.yml
deleted file mode 100644
index cce30c2db..000000000
--- a/roles/oso_host_monitoring/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: OpenShift
- description: apply monitoring container(s).
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml
deleted file mode 100644
index a0a453416..000000000
--- a/roles/oso_host_monitoring/tasks/main.yml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- fail:
- msg: "This playbook requires {{item}} to be set."
- when: "{{ item }} is not defined or {{ item }} == ''"
- with_items:
- - osohm_zagg_web_url
- - osohm_host_monitoring
- - osohm_docker_registry_url
- - osohm_default_zagg_server_user
- - osohm_default_zagg_server_password
-
-- name: create /etc/docker/ops
- file:
- path: /etc/docker/ops
- state: directory
- mode: 0770
- group: root
- owner: root
-
-- name: Copy dockercfg to /etc/docker/ops
- template:
- src: docker-registry.ops.cfg.j2
- dest: /etc/docker/ops/.dockercfg
- owner: root
- group: root
- mode: 0600
-
-- name: "Copy {{ osohm_host_monitoring }} systemd file"
- template:
- src: "{{ osohm_host_monitoring }}.service.j2"
- dest: "/etc/systemd/system/{{ osohm_host_monitoring }}.service"
- owner: root
- group: root
- mode: 0644
- notify:
- - "Restart the {{ osohm_host_monitoring }} service"
- register: systemd_host_monitoring
-
-- name: reload systemd
- command: /usr/bin/systemctl --system daemon-reload
- when: systemd_host_monitoring | changed
-
-- name: "Start the {{ osohm_host_monitoring }} service"
- service:
- name: "{{ osohm_host_monitoring }}"
- state: started
- enabled: yes
diff --git a/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2 b/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2
deleted file mode 100644
index 9e49da469..000000000
--- a/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2
+++ /dev/null
@@ -1 +0,0 @@
-{"{{ osohm_docker_registry_ops_url }}":{"auth":"{{ osohm_docker_registry_ops_key }}","email":"{{ osohm_docker_registry_ops_email }}"}}
diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
deleted file mode 100644
index e17092202..000000000
--- a/roles/oso_host_monitoring/templates/oso-rhel7-host-monitoring.service.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-# This is a systemd file to run this docker container under systemd.
-# To make this work:
-# * pull the image (probably from ops docker registry)
-# * place this file in /etc/systemd/system without the .systemd extension
-# * run the commands:
-# systemctl daemon-reload
-# systemctl enable oso-rhel7-host-monitoring
-# systemctl start oso-rhel7-host-monitoring
-#
-#
-[Unit]
-Description=Openshift Host Monitoring Container
-Requires=docker.service
-After=docker.service
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Environment=HOME=/etc/docker/ops
-#Slice=container-small.slice
-
-# systemd syntax '=-' ignore errors from return codes.
-ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
-
-# mwoodson note 1-7-16:
-# pcp recommends mounting /run in their Dockerfile
-# /run conflicts with cron which also runs in this container.
-# I am leaving /run out for now. the guys in #pcp said that they mounted /run
-# to shared the pcp socket that is created in /run. We are not using this,
-# as far as I know.
-# This problem goes away with systemd being run in the containers and not using
-# cron but using systemd timers
-# -v /run:/run \
-
-ExecStart=/usr/bin/docker run --name {{ osohm_host_monitoring }} \
- --privileged \
- --pid=host \
- --net=host \
- --ipc=host \
- -e ZAGG_URL={{ osohm_zagg_web_url }} \
- -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
- -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
- -e ZAGG_CLIENT_HOSTNAME={{ oo_name }} \
- -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
- -e OSO_CLUSTER_GROUP={{ cluster_group }} \
- -e OSO_CLUSTER_ID={{ oo_clusterid }} \
- -e OSO_ENVIRONMENT={{ oo_environment }} \
- -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['oo_hosttype'] }} \
- -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['oo_subhosttype'] }} \
- -e OSO_MASTER_HA={{ osohm_master_ha }} \
- -v /etc/localtime:/etc/localtime \
- -v /sys:/sys:ro \
- -v /sys/fs/selinux \
- -v /var/lib/docker:/var/lib/docker:ro \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v /var/run/openvswitch:/var/run/openvswitch \
-{% if hostvars[inventory_hostname]['oo_hosttype'] == 'master' %}
- -v /etc/origin/master/admin.kubeconfig:/etc/origin/master/admin.kubeconfig \
- -v /etc/origin/master/master.etcd-client.crt:/etc/origin/master/master.etcd-client.crt \
- -v /etc/origin/master/master.etcd-client.key:/etc/origin/master/master.etcd-client.key \
- -v /etc/origin/master/master-config.yaml:/etc/origin/master/master-config.yaml \
-{% elif hostvars[inventory_hostname]['oo_hosttype'] == 'node' %}
- -v /etc/origin/node:/etc/origin/node \
-{% endif %}
- {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
-
-
-ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
-ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
-Restart=always
-RestartSec=30
-
-[Install]
-WantedBy=default.target
diff --git a/roles/oso_host_monitoring/vars/main.yml b/roles/oso_host_monitoring/vars/main.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/roles/oso_host_monitoring/vars/main.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/roles/oso_monitoring_tools/README.md b/roles/oso_monitoring_tools/README.md
deleted file mode 100644
index 4215f9eeb..000000000
--- a/roles/oso_monitoring_tools/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-Role Name
-=========
-
-This role will install the Openshift Monitoring Utilities
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-osomt_zagg_client_config
-
-from vars/main.yml:
-
-osomt_zagg_client_config:
- host:
- name: "{{ osomt_host_name }}"
- zagg:
- url: "{{ osomt_zagg_url }}"
- user: "{{ osomt_zagg_user }}"
- pass: "{{ osomt_zagg_password }}"
- ssl_verify: "{{ osomt_zagg_ssl_verify }}"
- verbose: "{{ osomt_zagg_verbose }}"
- debug: "{{ osomt_zagg_debug }}"
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
-- role: "oso_monitoring_tools"
- osomt_host_name: hostname
- osomt_zagg_url: http://path.to/zagg_web
- osomt_zagg_user: admin
- osomt_zagg_password: password
- osomt_zagg_ssl_verify: True
- osomt_zagg_verbose: False
- osomt_zagg_debug: False
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-Openshift Operations
diff --git a/roles/oso_monitoring_tools/defaults/main.yml b/roles/oso_monitoring_tools/defaults/main.yml
deleted file mode 100644
index a17424f25..000000000
--- a/roles/oso_monitoring_tools/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/handlers/main.yml b/roles/oso_monitoring_tools/handlers/main.yml
deleted file mode 100644
index cefa780ab..000000000
--- a/roles/oso_monitoring_tools/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for oso_monitoring_tools
diff --git a/roles/oso_monitoring_tools/meta/main.yml b/roles/oso_monitoring_tools/meta/main.yml
deleted file mode 100644
index 9c42b68dc..000000000
--- a/roles/oso_monitoring_tools/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: OpenShift Operations
- description: Install Openshift Monitoring tools
- company: Red Hat, Inc
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/oso_monitoring_tools/tasks/main.yml b/roles/oso_monitoring_tools/tasks/main.yml
deleted file mode 100644
index c90fc56e2..000000000
--- a/roles/oso_monitoring_tools/tasks/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# tasks file for oso_monitoring_tools
-- name: Install the Openshift Tools RPMS
- yum:
- name: "{{ item }}"
- state: latest
- with_items:
- - openshift-tools-scripts-monitoring-zagg-client
- - python-openshift-tools-monitoring-zagg
- - python-openshift-tools-monitoring-zabbix
-
-- debug: var=g_zagg_client_config
-
-- name: Generate the /etc/openshift_tools/zagg_client.yaml config file
- copy:
- content: "{{ osomt_zagg_client_config | to_nice_yaml }}"
- dest: /etc/openshift_tools/zagg_client.yaml
- mode: "644"
diff --git a/roles/oso_monitoring_tools/vars/main.yml b/roles/oso_monitoring_tools/vars/main.yml
deleted file mode 100644
index 3538ba30b..000000000
--- a/roles/oso_monitoring_tools/vars/main.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# vars file for oso_monitoring_tools
-osomt_zagg_client_config:
- host:
- name: "{{ osomt_host_name }}"
- zagg:
- url: "{{ osomt_zagg_url }}"
- user: "{{ osomt_zagg_user }}"
- pass: "{{ osomt_zagg_password }}"
- ssl_verify: "{{ osomt_zagg_ssl_verify }}"
- verbose: "{{ osomt_zagg_verbose }}"
- debug: "{{ osomt_zagg_debug }}"
diff --git a/roles/tito/README.md b/roles/tito/README.md
deleted file mode 100644
index c4e2856dc..000000000
--- a/roles/tito/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-This role manages Tito.
-
-https://github.com/dgoodwin/tito
-
-Requirements
-------------
-
-None
-
-Role Variables
---------------
-
-None
-
-Dependencies
-------------
-
-None
-
-Example Playbook
-----------------
-
- - hosts: servers
- roles:
- - role: tito
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Thomas Wiest
diff --git a/roles/tito/defaults/main.yml b/roles/tito/defaults/main.yml
deleted file mode 100644
index dd7cd269e..000000000
--- a/roles/tito/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for tito
diff --git a/roles/tito/handlers/main.yml b/roles/tito/handlers/main.yml
deleted file mode 100644
index e9ce609d5..000000000
--- a/roles/tito/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for tito
diff --git a/roles/tito/meta/main.yml b/roles/tito/meta/main.yml
deleted file mode 100644
index fb121c08e..000000000
--- a/roles/tito/meta/main.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Manages Tito
- company: Red Hat
- license: Apache License, Version 2.0
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
- categories:
- - packaging
-dependencies: []
diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml
deleted file mode 100644
index 3cf9e2bfd..000000000
--- a/roles/tito/tasks/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- action: "{{ ansible_pkg_mgr }} name=tito state=present"
diff --git a/roles/tito/vars/main.yml b/roles/tito/vars/main.yml
deleted file mode 100644
index 8a1aafc41..000000000
--- a/roles/tito/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for tito
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
deleted file mode 100644
index 908ab4972..000000000
--- a/roles/yum_repos/README.md
+++ /dev/null
@@ -1,113 +0,0 @@
-Yum Repos
-=========
-
-This role allows easy deployment of yum repository config files.
-
-Requirements
-------------
-
-Yum or dnf
-
-Role Variables
---------------
-
-| Name | Default value | |
-|-------------------|---------------|--------------------------------------------|
-| repo_files | None | |
-| repo_enabled | 1 | Should repos be enabled by default |
-| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default |
-
-Dependencies
-------------
-
-Example Playbook
-----------------
-
-A single repo file containing a single repo:
- - hosts: servers
- roles:
- - role: yum_repos
- repo_files:
- - id: my_repo
- repos:
- - id: my_repo
- name: My Awesome Repo
- baseurl: https://my.awesome.repo/is/available/here
- skip_if_unavailable: yes
- gpgkey: https://my.awesome.repo/pubkey.gpg
-
-A single repo file containing a single repo, disabling gpgcheck
- - hosts: servers
- roles:
- - role: yum_repos
- repo_files:
- - id: my_other_repo
- repos:
- - id: my_other_repo
- name: My Other Awesome Repo
- baseurl: https://my.other.awesome.repo/is/available/here
- gpgcheck: no
-
-A single repo file containing a single disabled repo
- - hosts: servers
- roles:
- - role: yum_repos
- repo_files:
- - id: my_other_repo
- repos:
- - id: my_other_repo
- name: My Other Awesome Repo
- baseurl: https://my.other.awesome.repo/is/available/here
- enabled: no
-
-A single repo file containing multiple repos
- - hosts: servers
- roles:
- - role: yum_repos
- repo_files:
- id: my_repos
- repos:
- - id: my_repo
- name: My Awesome Repo
- baseurl: https://my.awesome.repo/is/available/here
- gpgkey: https://my.awesome.repo/pubkey.gpg
- - id: my_other_repo
- name: My Other Awesome Repo
- baseurl: https://my.other.awesome.repo/is/available/here
- gpgkey: https://my.other.awesome.repo/pubkey.gpg
-
-Multiple repo files containing multiple repos
- - hosts: servers
- roles:
- - role: yum_repos
- repo_files:
- - id: my_repos
- repos:
- - id: my_repo
- name: My Awesome Repo
- baseurl: https://my.awesome.repo/is/available/here
- gpgkey: https://my.awesome.repo/pubkey.gpg
- - id: my_other_repo
- name: My Other Awesome Repo
- baseurl: https://my.other.awesome.repo/is/available/here
- gpgkey: https://my.other.awesome.repo/pubkey.gpg
- - id: joes_repos
- repos:
- - id: joes_repo
- name: Joe's Less Awesome Repo
- baseurl: https://joes.repo/is/here
- gpgkey: https://joes.repo/pubkey.gpg
- - id: joes_otherrepo
- name: Joe's Other Less Awesome Repo
- baseurl: https://joes.repo/is/there
- gpgkey: https://joes.repo/pubkey.gpg
-
-License
--------
-
-ASL 2.0
-
-Author Information
-------------------
-
-openshift online operations
diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml
deleted file mode 100644
index 515fb7a4a..000000000
--- a/roles/yum_repos/defaults/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-repo_enabled: 1
-repo_gpgcheck: 1
diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml
deleted file mode 100644
index 6b8374da9..000000000
--- a/roles/yum_repos/meta/main.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-galaxy_info:
- author: openshift operations
- description:
- company: Red Hat, Inc.
- license: ASL 2.0
- min_ansible_version: 1.2
-dependencies: []
diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml
deleted file mode 100644
index 46928a00b..000000000
--- a/roles/yum_repos/tasks/main.yml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-# Convert old params to new params
-- set_fact:
- repo_files:
- - id: "{{ repo_tag }}"
- repos:
- - id: "{{ repo_tag }}"
- name: "{{ repo_name }}"
- baseurl: "{{ repo_baseurl }}"
- enabled: "{{ repo_enabled }}"
- gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}"
- sslverify: "{{ repo_sslverify | default(None) }}"
- sslclientcert: "{{ repo_sslclientcert | default(None) }}"
- sslclientkey: "{{ repo_sslclientkey | default(None) }}"
- gpgkey: "{{ repo_gpgkey | default(None) }}"
- when: repo_files is not defined
-
-- name: Verify repo_files is a list
- assert:
- that:
- - repo_files is iterable and repo_files is not string and repo_files is not mapping
-
-- name: Verify repo_files items have an id and a repos list
- assert:
- that:
- - item is mapping
- - "'id' in item"
- - "'repos' in item"
- - item.repos is iterable and item.repos is not string and item.repos is not mapping
- with_items: repo_files
-
-- name: Verify that repo_files.repos have the required keys
- assert:
- that:
- - item.1 is mapping
- - "'id' in item.1"
- - "'name' in item.1"
- - "'baseurl' in item.1"
- with_subelements:
- - repo_files
- - repos
-
-- name: Installing yum-repo template
- template:
- src: yumrepo.j2
- dest: /etc/yum.repos.d/{{ item.id }}.repo
- with_items: repo_files
- when: not openshift.common.is_containerized | bool
diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2
deleted file mode 100644
index 0dfdbfe43..000000000
--- a/roles/yum_repos/templates/yumrepo.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-{% set repos = item.repos %}
-{% for repo in repos %}
-[{{ repo.id }}]
-name={{ repo.name }}
-baseurl={{ repo.baseurl }}
-{% set repo_enabled_value = repo.enabled | default(repo_enabled) %}
-{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %}
-enabled={{ enable_repo }}
-{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %}
-{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %}
-gpgcheck={{ enable_gpgcheck }}
-{% for key, value in repo.iteritems() %}
-{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %}
-{{ key }}={{ value }}
-{% endif %}
-{% endfor %}
-
-{% endfor %}
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 3c5869a60..71bdbd5a1 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -163,11 +163,10 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
if masters_set or num_masters != 2:
more_hosts = click.confirm('Do you want to add additional hosts?')
- if num_masters == 1:
- master = next((host for host in hosts if host.master), None)
- master.storage = True
- elif num_masters >= 3:
+ if num_masters >= 3:
collect_master_lb(hosts)
+
+ if not existing_env:
collect_storage_host(hosts)
return hosts
@@ -306,12 +305,17 @@ def collect_storage_host(hosts):
message = """
Setting up High Availability Masters requires a storage host. Please provide a
host that will be configured as a Registry Storage.
+
+Note: Containerized storage hosts are not currently supported.
"""
click.echo(message)
host_props = {}
+ first_master = next(host for host in hosts if host.master)
+
hostname_or_ip = click.prompt('Enter hostname or IP address',
- value_proc=validate_prompt_hostname)
+ value_proc=validate_prompt_hostname,
+ default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
if existing and existing_host.node:
existing_host.storage = True
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 713a9a423..5ace63918 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -121,7 +121,7 @@ def write_inventory_vars(base_inventory, multiple_masters, proxy):
base_inventory.write('\n[OSEv3:vars]\n')
base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
if CFG.settings['ansible_ssh_user'] != 'root':
- base_inventory.write('ansible_become=true\n')
+ base_inventory.write('ansible_become=yes\n')
if multiple_masters and proxy is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
@@ -170,7 +170,7 @@ def write_host(host, inventory, schedulable=None):
if no_pwd_sudo == 1:
print 'The atomic-openshift-installer requires sudo access without a password.'
sys.exit(1)
- facts += ' ansible_become=true'
+ facts += ' ansible_become=yes'
inventory.write('{} {}\n'.format(host.connect_to, facts))
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 524df08c4..66ed66660 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -37,6 +37,14 @@ MOCK_FACTS = {
'public_hostname': 'node2.example.com'
}
},
+ '10.1.0.1': {
+ 'common': {
+ 'ip': '10.1.0.1',
+ 'public_ip': '10.1.0.1',
+ 'hostname': 'storage-private.example.com',
+ 'public_hostname': 'storage.example.com'
+ }
+ },
}
MOCK_FACTS_QUICKHA = {
@@ -250,6 +258,12 @@ hosts:
public_hostname: node2.example.com
node: true
master: true
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ storage: true
"""
QUICKHA_CONFIG_PRECONFIGURED_LB = """
@@ -720,17 +734,18 @@ class AttendedCliTests(OOCliFixture):
('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
- self._verify_run_playbook(run_playbook_mock, 3, 3)
+ self._verify_run_playbook(run_playbook_mock, 4, 4)
written_config = read_yaml(self.config_file)
- self._verify_config_hosts(written_config, 3)
+ self._verify_config_hosts(written_config, 4)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
@@ -762,7 +777,8 @@ class AttendedCliTests(OOCliFixture):
add_nodes=[('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.0.0.1',)
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
@@ -813,7 +829,8 @@ class AttendedCliTests(OOCliFixture):
ssh_user='root',
variant_num=1,
schedulable_masters_ok=True,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.0.0.1',)
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
run_playbook_mock,
@@ -970,7 +987,8 @@ class AttendedCliTests(OOCliFixture):
('10.0.0.1', True, False)],
ssh_user='root',
variant_num=1,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.0.0.1')
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
@@ -998,7 +1016,8 @@ class AttendedCliTests(OOCliFixture):
('10.0.0.1', True, False)],
ssh_user='root',
variant_num=3,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
result = self.runner.invoke(cli.cli, self.cli_args,
input=cli_input)
@@ -1019,7 +1038,8 @@ class AttendedCliTests(OOCliFixture):
('10.0.0.3', False, False)],
ssh_user='root',
variant_num=1,
- confirm_facts='y')
+ confirm_facts='y',
+ storage='10.1.0.1',)
self.cli_args.append("install")
self.cli_args.append("--gen-inventory")
result = self.runner.invoke(cli.cli, self.cli_args,
@@ -1032,7 +1052,7 @@ class AttendedCliTests(OOCliFixture):
self.assertEquals(0, len(run_playbook_mock.mock_calls))
written_config = read_yaml(self.config_file)
- self._verify_config_hosts(written_config, 3)
+ self._verify_config_hosts(written_config, 4)
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))