summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/.readme3
-rw-r--r--.tito/packages/openshift-ansible1
-rw-r--r--.tito/packages/openshift-ansible-bin1
-rw-r--r--.tito/packages/openshift-ansible-inventory1
-rw-r--r--.tito/releasers.conf13
-rw-r--r--.tito/tito.props5
-rw-r--r--README.md4
-rw-r--r--README_AEP.md37
-rw-r--r--README_AWS.md4
-rw-r--r--README_GCE.md17
-rw-r--r--README_libvirt.md23
-rwxr-xr-xbin/cluster54
-rw-r--r--bin/openshift-ansible-bin.spec122
-rw-r--r--inventory/byo/hosts.example27
-rwxr-xr-xinventory/gce/hosts/gce.py9
-rwxr-xr-xinventory/multi_ec2.py34
-rw-r--r--inventory/multi_ec2.yaml.example2
-rw-r--r--inventory/openshift-ansible-inventory.spec108
-rwxr-xr-xinventory/openstack/hosts/nova.py2
-rw-r--r--openshift-ansible.spec315
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml77
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml11
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml115
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml2
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j24
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml27
-rw-r--r--playbooks/adhoc/uninstall.yml159
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml25
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml5
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml41
-rw-r--r--playbooks/gce/openshift-cluster/config.yml5
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml49
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml33
-rw-r--r--playbooks/gce/openshift-cluster/list.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml32
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml55
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data2
-rw-r--r--roles/cockpit/defaults/main.yml5
-rw-r--r--roles/cockpit/meta/main.yml15
-rw-r--r--roles/cockpit/tasks/main.yml16
-rw-r--r--roles/etcd/README.md2
-rw-r--r--roles/etcd/defaults/main.yaml8
-rw-r--r--roles/etcd/handlers/main.yml1
-rw-r--r--roles/etcd/meta/main.yml2
-rw-r--r--roles/etcd/tasks/main.yml12
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/etcd_ca/meta/main.yml2
-rw-r--r--roles/etcd_ca/tasks/main.yml30
-rw-r--r--roles/etcd_ca/templates/openssl_append.j230
-rw-r--r--roles/etcd_ca/vars/main.yml3
-rw-r--r--roles/etcd_certificates/tasks/client.yml2
-rw-r--r--roles/etcd_certificates/tasks/main.yml3
-rw-r--r--roles/etcd_certificates/tasks/server.yml10
-rw-r--r--roles/etcd_certificates/vars/main.yml11
-rw-r--r--roles/etcd_common/README.md34
-rw-r--r--roles/etcd_common/defaults/main.yml30
-rw-r--r--roles/etcd_common/meta/main.yml16
-rw-r--r--roles/etcd_common/tasks/main.yml13
-rw-r--r--roles/etcd_common/templates/host_int_map.j213
-rw-r--r--roles/lib_zabbix/library/zbx_item.py43
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py6
-rw-r--r--roles/lib_zabbix/library/zbx_trigger.py24
-rw-r--r--roles/lib_zabbix/library/zbx_user_media.py3
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml12
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml7
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j226
-rw-r--r--roles/openshift_examples/defaults/main.yml6
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json18
-rw-r--r--roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json15
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/cakephp.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json9
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/dancer.json11
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json14
-rw-r--r--roles/openshift_examples/files/examples/quickstart-templates/nodejs.json12
-rw-r--r--roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json4
-rw-r--r--roles/openshift_examples/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py118
-rw-r--r--roles/openshift_facts/tasks/main.yml2
-rw-r--r--roles/openshift_manage_node/tasks/main.yml8
-rw-r--r--roles/openshift_master/handlers/main.yml2
-rw-r--r--roles/openshift_master/tasks/main.yml36
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j225
-rw-r--r--roles/openshift_master/templates/scheduler.json.j22
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j21
-rw-r--r--roles/openshift_master_ca/tasks/main.yml3
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml5
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml21
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml5
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml12
-rw-r--r--roles/openshift_node/tasks/storage_plugins/main.yml13
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml7
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/openshift_registry/tasks/main.yml11
-rw-r--r--roles/openshift_router/tasks/main.yml11
-rw-r--r--roles/os_zabbix/tasks/main.yml36
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_agent.yml23
-rw-r--r--roles/os_zabbix/vars/template_app_zabbix_server.yml412
-rw-r--r--roles/os_zabbix/vars/template_docker.yml5
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml110
-rw-r--r--roles/os_zabbix/vars/template_openshift_node.yml28
-rw-r--r--roles/os_zabbix/vars/template_ops_tools.yml23
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml65
-rw-r--r--roles/os_zabbix/vars/template_performance_copilot.yml14
-rw-r--r--utils/.gitignore45
-rw-r--r--utils/README.txt24
-rw-r--r--utils/docs/config.md72
-rw-r--r--utils/etc/ansible.cfg25
-rw-r--r--utils/setup.cfg5
-rw-r--r--utils/setup.py85
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh86
-rw-r--r--utils/site_assets/oo_install_launcher.README.txt22
-rw-r--r--utils/src/DESCRIPTION.rst13
-rw-r--r--utils/src/MANIFEST.in9
-rw-r--r--utils/src/data/data_file1
-rw-r--r--utils/src/ooinstall/__init__.py5
-rw-r--r--utils/src/ooinstall/ansible_plugins/facts_callback.py88
-rw-r--r--utils/src/ooinstall/cli_installer.py479
-rw-r--r--utils/src/ooinstall/install_transactions.py133
-rw-r--r--utils/src/ooinstall/oo_config.py195
-rw-r--r--utils/src/ooinstall/variants.py74
-rw-r--r--utils/test/__init__.py0
-rw-r--r--utils/test/cli_installer_tests.py471
-rw-r--r--utils/test/oo_config_tests.py158
-rw-r--r--utils/workflows/enterprise_deploy/openshift.sh2
133 files changed, 4252 insertions, 691 deletions
diff --git a/.tito/packages/.readme b/.tito/packages/.readme
new file mode 100644
index 000000000..b9411e2d1
--- /dev/null
+++ b/.tito/packages/.readme
@@ -0,0 +1,3 @@
+the .tito/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
new file mode 100644
index 000000000..92f545b25
--- /dev/null
+++ b/.tito/packages/openshift-ansible
@@ -0,0 +1 @@
+3.0.6-1 ./
diff --git a/.tito/packages/openshift-ansible-bin b/.tito/packages/openshift-ansible-bin
new file mode 100644
index 000000000..5275dfcf9
--- /dev/null
+++ b/.tito/packages/openshift-ansible-bin
@@ -0,0 +1 @@
+0.0.21-1 bin/
diff --git a/.tito/packages/openshift-ansible-inventory b/.tito/packages/openshift-ansible-inventory
new file mode 100644
index 000000000..85502438a
--- /dev/null
+++ b/.tito/packages/openshift-ansible-inventory
@@ -0,0 +1 @@
+0.0.11-1 inventory/
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
new file mode 100644
index 000000000..f863ce9b1
--- /dev/null
+++ b/.tito/releasers.conf
@@ -0,0 +1,13 @@
+[brew]
+releaser = tito.release.DistGitReleaser
+branches = libra-rhel-7
+
+[ose-3.0]
+releaser = tito.release.DistGitReleaser
+branches = rhose-3.0-rhel-7
+srpm_disttag = .el7ose
+
+[aos-3.1]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.1-rhel-7
+srpm_disttag = .el7aos
diff --git a/.tito/tito.props b/.tito/tito.props
new file mode 100644
index 000000000..eab3f190d
--- /dev/null
+++ b/.tito/tito.props
@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)
diff --git a/README.md b/README.md
index 489f9b8e9..635df36a0 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-#Openshift and Atomic Enterprise Ansible
+#OpenShift and Atomic Enterprise Ansible
-This repo contains Ansible code for Openshift and Atomic Enterprise.
+This repo contains Ansible code for OpenShift and Atomic Enterprise.
##Setup
- Install base dependencies:
diff --git a/README_AEP.md b/README_AEP.md
index e29888617..83e575ebe 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -76,39 +76,30 @@ ansible_ssh_user=root
# If ansible_ssh_user is not root, ansible_sudo must be set to true
#ansible_sudo=true
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
+# See DEPLOYMENT_TYPES.md
+deployment_type=atomic-enterprise
-# Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+# Pre-release registry URL; note that in the future these images
+# may have an atomicenterprise/aep- prefix or so.
+oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
-'baseurl':
-'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
-'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
-'OpenShift Origin COPR', 'baseurl':
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
-'enabled': 1, 'gpgcheck': 1, gpgkey:
-'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
# host group for masters
[masters]
-ose3-master.example.com
+aep3-master.example.com
# host group for nodes
[nodes]
-ose3-node[1:2].example.com
+aep3-node[1:2].example.com
```
The hostnames above should resolve both from the hosts themselves and
the host where ansible is running (if different).
## Running the ansible playbooks
-From the atomic-enterprise-ansible checkout run:
+From the openshift-ansible checkout run:
```sh
ansible-playbook playbooks/byo/config.yml
```
@@ -120,16 +111,18 @@ inventory file use the -i option for ansible-playbook.
On the master host:
```sh
oadm router --create=true \
- --credentials=/etc/openshift/master/openshift-router.kubeconfig \
- --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}'
+ --service-account=router \
+ --credentials=/etc/origin/master/openshift-router.kubeconfig \
+ --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}'
```
#### Create the default docker-registry
On the master host:
```sh
oadm registry --create=true \
- --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
- --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \
+ --service-account=registry \
+ --credentials=/etc/origin/master/openshift-registry.kubeconfig \
+ --images='rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}' \
--mount-host=/var/lib/openshift/docker-registry
```
diff --git a/README_AWS.md b/README_AWS.md
index 3a5790eb3..6757e2892 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -38,8 +38,8 @@ You may also want to allow access from the outside world on the following ports:
• 80 - Web Apps
• 443 - Web Apps (https)
• 4789 - SDN / VXLAN
-• 8443 - Openshift Console
-• 10250 - kubelet
+• 8443 - OpenShift Console
+• 10250 - kubelet
```
diff --git a/README_GCE.md b/README_GCE.md
index f6c5138c1..50f8ade70 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -39,6 +39,13 @@ Create a gce.ini file for GCE
* gce_service_account_pem_file_path - Full path from previous steps
* gce_project_id - Found in "Projects", it list all the gce projects you are associated with. The page lists their "Project Name" and "Project ID". You want the "Project ID"
+Mandatory customization variables (check the values according to your tenant):
+* zone = europe-west1-d
+* network = default
+* gce_machine_type = n1-standard-2
+* gce_machine_image = preinstalled-slave-50g-v5
+
+
1. vi ~/.gce/gce.ini
1. make the contents look like this:
```
@@ -46,11 +53,15 @@ Create a gce.ini file for GCE
gce_service_account_email_address = long...@developer.gserviceaccount.com
gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
gce_project_id = project_id
+zone = europe-west1-d
+network = default
+gce_machine_type = n1-standard-2
+gce_machine_image = preinstalled-slave-50g-v5
+
```
-1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py)
+1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
```
- cd openshift-ansible/inventory/gce
- ln -s ~/.gce/gce.ini gce.ini
+export GCE_INI_PATH=~/.gce/gce.ini
```
diff --git a/README_libvirt.md b/README_libvirt.md
index 1a710ff3b..fd0250781 100644
--- a/README_libvirt.md
+++ b/README_libvirt.md
@@ -8,16 +8,18 @@ This makes `libvirt` useful to develop, test and debug OpenShift and openshift-a
Install dependencies
--------------------
-1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2. Install [ebtables](http://ebtables.netfilter.org/)
-3. Install [qemu](http://wiki.qemu.org/Main_Page)
-4. Install [libvirt](http://libvirt.org/)
-5. Enable and start the libvirt daemon, e.g:
+1. Install [ansible](http://www.ansible.com/)
+2. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+3. Install [ebtables](http://ebtables.netfilter.org/)
+4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
+5. Install [libvirt-python and libvirt](http://libvirt.org/)
+6. Install [genisoimage](http://cdrkit.org/)
+7. Enable and start the libvirt daemon, e.g:
- `systemctl enable libvirtd`
- `systemctl start libvirtd`
-6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7. Check that your `$HOME` is accessible to the qemu user²
-8. Configure dns resolution on the host³
+8. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+9. Check that your `$HOME` is accessible to the qemu user²
+10. Configure dns resolution on the host³
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
@@ -75,7 +77,7 @@ In order to fix that issue, you have several possibilities:
* accessible by the qemu user.
* Grant the qemu user access to the storage pool.
-On Arch:
+On Arch or Fedora 22+:
```
setfacl -m g:kvm:--x ~
@@ -94,7 +96,8 @@ dns=dnsmasq
- Configure dnsmasq to use the Virtual Network router for example.com:
```sh
-sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
+server=/example.com/192.168.55.1
```
Test The Setup
diff --git a/bin/cluster b/bin/cluster
index 582327415..59a6755d3 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -5,6 +5,7 @@ import argparse
import ConfigParser
import os
import sys
+import subprocess
import traceback
@@ -53,7 +54,6 @@ class Cluster(object):
"""
Create an OpenShift cluster for given provider
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
@@ -65,65 +65,60 @@ class Cluster(object):
env['num_infra'] = args.infra
env['num_etcd'] = args.etcd
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def terminate(self, args):
"""
Destroy OpenShift cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def list(self, args):
"""
List VMs in cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def config(self, args):
"""
Configure or reconfigure OpenShift across clustered VMs
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def update(self, args):
"""
Update to latest OpenShift across clustered VMs
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def service(self, args):
"""
Make the same service call across all nodes in the cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args),
@@ -132,7 +127,7 @@ class Cluster(object):
playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, env, playbook)
def setup_provider(self, provider):
"""
@@ -142,10 +137,14 @@ class Cluster(object):
"""
config = ConfigParser.ConfigParser()
if 'gce' == provider:
- config.readfp(open('inventory/gce/hosts/gce.ini'))
+ gce_ini_default_path = os.path.join(
+ 'inventory/gce/hosts/gce.ini')
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+ if os.path.exists(gce_ini_path):
+ config.readfp(open(gce_ini_path))
- for key in config.options('gce'):
- os.environ[key] = config.get('gce', key)
+ for key in config.options('gce'):
+ os.environ[key] = config.get('gce', key)
inventory = '-i inventory/gce/hosts'
elif 'aws' == provider:
@@ -183,7 +182,6 @@ class Cluster(object):
:param inventory: derived provider library
:param env: environment variables for kubernetes
:param playbook: ansible playbook to execute
- :return: exit status from ansible-playbook command
"""
verbose = ''
@@ -213,7 +211,18 @@ class Cluster(object):
sys.stderr.write('RUN [{}]\n'.format(command))
sys.stderr.flush()
- return os.system(command)
+ try:
+ subprocess.check_call(command, shell=True)
+ except subprocess.CalledProcessError as exc:
+ raise ActionFailed("ACTION [{}] failed: {}"
+ .format(args.action, exc))
+
+
+class ActionFailed(Exception):
+ """
+ Raised when action failed.
+ """
+ pass
if __name__ == '__main__':
@@ -328,14 +337,11 @@ if __name__ == '__main__':
sys.stderr.write('\nACTION [update] aborted by user!\n')
exit(1)
- status = 1
try:
- status = args.func(args)
- if status != 0:
- sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
- except Exception, e:
+ args.func(args)
+ except Exception as exc:
if args.verbose:
traceback.print_exc(file=sys.stderr)
else:
- sys.stderr.write("{}\n".format(e))
- exit(status)
+ print >>sys.stderr, exc
+ exit(1)
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
deleted file mode 100644
index d90810bc3..000000000
--- a/bin/openshift-ansible-bin.spec
+++ /dev/null
@@ -1,122 +0,0 @@
-Summary: OpenShift Ansible Scripts for working with metadata hosts
-Name: openshift-ansible-bin
-Version: 0.0.19
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2, openshift-ansible-inventory
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}%{_bindir}
-mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
-mkdir -p %{buildroot}/etc/bash_completion.d
-mkdir -p %{buildroot}/etc/openshift_ansible
-
-cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-
-# Make it so we can load multi_ec2.py as a library.
-rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
-ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
-ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
-
-cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
-
-cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
-
-%files
-%{_bindir}/*
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-%changelog
-* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.19-1
-- Updated to show private ips when doing a list (kwoodson@redhat.com)
-- Updated to read config first and default to users home dir
- (kwoodson@redhat.com)
-- Prevent Ansible from serializing tasks (lhuard@amadeus.com)
-- Infra node support (whearn@redhat.com)
-- Playbook updates for clustered etcd (jdetiber@redhat.com)
-- bin/cluster supports boto credentials as well as env variables
- (jdetiber@redhat.com)
-- Merge pull request #291 from lhuard1A/profile
- (twiest@users.noreply.github.com)
-- Add a generic mechanism for passing options (lhuard@amadeus.com)
-- Infrastructure - Validate AWS environment before calling playbooks
- (jhonce@redhat.com)
-- Add a --profile option to spot which task takes more time
- (lhuard@amadeus.com)
-- changed Openshift to OpenShift (twiest@redhat.com)
-
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * Update defaults and examples to track core concepts guide
- (jhonce@redhat.com)
-- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com)
-- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
-- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
-- Adding cache location for multi ec2 (kwoodson@redhat.com)
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
-- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
-
-* Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
-- fixed opssh and opscp to allow just environment or just host-type.
- (twiest@redhat.com)
-
-* Mon May 04 2015 Thomas Wiest <twiest@redhat.com> 0.0.11-1
-- changed opssh to a bash script using ohi to make it easier to maintain, and
- to expose all of the pssh features directly. (twiest@redhat.com)
-- Added --user option to ohi to pre-pend the username in the hostlist output.
- (twiest@redhat.com)
-- Added utils.py that contains a normalize_dnsname function good for sorting
- dns names to a human readable list. (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.10-1
-- added --list-host-types option to opscp (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.9-1
-- added opscp (twiest@redhat.com)
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
-- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
-
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
-- added the ability to run opssh and ohi on all hosts in an environment, as
- well as all hosts of the same host-type regardless of environment
- (twiest@redhat.com)
-- added ohi (twiest@redhat.com)
-* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- fixed bug where opssh would throw an exception if pssh returned a non-zero
- exit code (twiest@redhat.com)
-
-* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- fixed the opssh default output behavior to be consistent with pssh. Also
- fixed a bug in how directories are named for --outdir and --errdir.
- (twiest@redhat.com)
-* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- created a python package named openshift_ansible (twiest@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index c2c5090f9..ad19fe116 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -41,9 +41,22 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
# Configure Fluentd
#use_fluentd=true
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
# master cluster ha variables using pacemaker or RHEL HA
#openshift_master_cluster_password=openshift_cluster
#openshift_master_cluster_vip=192.168.133.25
@@ -69,6 +82,20 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default project node selector
#osm_default_node_selector='region=primary'
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
+# default selectors for router and registry services
+# openshift_router_selector='region=infra'
+# openshift_registry_selector='region=infra'
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index 3403f735e..6ed12e011 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -120,6 +120,7 @@ class GceInventory(object):
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
@@ -173,6 +174,7 @@ class GceInventory(object):
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
@@ -211,7 +213,8 @@ class GceInventory(object):
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
- 'gce_public_ip': inst.public_ips[0],
+ # Hosts don't always have a public IP name
+ #'gce_public_ip': inst.public_ips[0],
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
@@ -219,8 +222,8 @@ class GceInventory(object):
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
- # Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0]
+ # Hosts don't always have a public IP name
+ #'ansible_ssh_host': inst.public_ips[0]
}
def get_instance(self, instance_name):
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 2cbf33473..98dde3f3c 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -239,22 +239,34 @@ class MultiEc2(object):
def apply_account_config(self, acc_config):
''' Apply account config settings
'''
- if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
- return
-
results = self.all_ec2_results[acc_config['name']]
- # Update each hostvar with the newly desired key: value
- for host_property, value in acc_config['hostvars'].items():
+
+ # Update each hostvar with the newly desired key: value from extra_*
+ for _extra in ['extra_groups', 'extra_vars']:
+ for new_var, value in acc_config.get(_extra, {}).items():
+ # Verify the account results look sane
+ # by checking for these keys ('_meta' and 'hostvars' exist)
+ if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
+ for data in results['_meta']['hostvars'].values():
+ data[str(new_var)] = str(value)
+
+ # Add this group
+ if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
+ results["%s_%s" % (new_var, value)] = \
+ copy.copy(results[acc_config['all_group']])
+
+ # Clone groups goes here
+ for to_name, from_name in acc_config.get('clone_groups', {}).items():
+ if results.has_key(from_name):
+ results[to_name] = copy.copy(results[from_name])
+
+ # Clone vars goes here
+ for to_name, from_name in acc_config.get('clone_vars', {}).items():
# Verify the account results look sane
# by checking for these keys ('_meta' and 'hostvars' exist)
if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
for data in results['_meta']['hostvars'].values():
- data[str(host_property)] = str(value)
-
- # Add this group
- if results.has_key(acc_config['all_group']):
- results["%s_%s" % (host_property, value)] = \
- copy.copy(results[acc_config['all_group']])
+ data[str(to_name)] = data.get(str(from_name), 'nil')
# store the results back into all_ec2_results
self.all_ec2_results[acc_config['name']] = results
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index 99f157b11..bbd81ad20 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -18,7 +18,7 @@ accounts:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
all_group: ec2
- hostvars:
+ extra_vars:
cloud: aws
account: aws1
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
deleted file mode 100644
index f163f865a..000000000
--- a/inventory/openshift-ansible-inventory.spec
+++ /dev/null
@@ -1,108 +0,0 @@
-Summary: OpenShift Ansible Inventories
-Name: openshift-ansible-inventory
-Version: 0.0.9
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}/usr/share/ansible/inventory
-mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
-mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
-
-cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
-cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
-
-%files
-%config(noreplace) /etc/ansible/*
-%dir /usr/share/ansible/inventory
-/usr/share/ansible/inventory/multi_ec2.py*
-/usr/share/ansible/inventory/aws/ec2.py*
-/usr/share/ansible/inventory/gce/gce.py*
-
-%changelog
-* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.9-1
-- Merge pull request #408 from sdodson/docker-buildvm (bleanhar@redhat.com)
-- Merge pull request #428 from jtslear/issue-383
- (twiest@users.noreply.github.com)
-- Merge pull request #407 from aveshagarwal/ae-ansible-merge-auth
- (bleanhar@redhat.com)
-- Enable htpasswd by default in the example hosts file. (avagarwa@redhat.com)
-- Add support for setting default node selector (jdetiber@redhat.com)
-- Merge pull request #429 from spinolacastro/custom_cors (bleanhar@redhat.com)
-- Updated to read config first and default to users home dir
- (kwoodson@redhat.com)
-- Fix Custom Cors (spinolacastro@gmail.com)
-- Revert "namespace the byo inventory so the group names aren't so generic"
- (sdodson@redhat.com)
-- Removes hardcoded python2 (jtslear@gmail.com)
-- namespace the byo inventory so the group names aren't so generic
- (admiller@redhat.com)
-- docker-buildvm-rhose is dead (sdodson@redhat.com)
-- Add support for setting routingConfig:subdomain (jdetiber@redhat.com)
-- Initial HA master (jdetiber@redhat.com)
-- Make it clear that the byo inventory file is just an example
- (jdetiber@redhat.com)
-- Playbook updates for clustered etcd (jdetiber@redhat.com)
-- Update for RC2 changes (sdodson@redhat.com)
-- Templatize configs and 0.5.2 changes (jdetiber@redhat.com)
-
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
-- Added more verbosity when error happens. Also fixed a bug.
- (kwoodson@redhat.com)
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * rename openshift_registry_url oreg_url * rename option_images to
- _{oreg|ortr}_images (jhonce@redhat.com)
-- Fix the remaining pylint warnings (lhuard@amadeus.com)
-- Fix some of the pylint warnings (lhuard@amadeus.com)
-- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com)
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
-- Making multi_ec2 into a library (kwoodson@redhat.com)
-
-* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- Added support for grouping and a bug fix. (kwoodson@redhat.com)
-
-* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
- not dictating what the ec2.ini file should look like. (twiest@redhat.com)
-- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- fixed build problems with openshift-ansible-inventory.spec
- (twiest@redhat.com)
-- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
-- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
-- Adding refresh-cache option and cleanup for pylint. Also updated for
- aws/hosts/ being added. (kwoodson@redhat.com)
-
-* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added the ability to have a config file in /etc/openshift_ansible to
- multi_ec2.py. (twiest@redhat.com)
-- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
-- gce inventory/playbook updates for node registration changes
- (jdetiber@redhat.com)
-- Various fixes (jdetiber@redhat.com)
-
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py
index d5bd8d1ee..3197a57bc 100755
--- a/inventory/openstack/hosts/nova.py
+++ b/inventory/openstack/hosts/nova.py
@@ -34,7 +34,7 @@ except ImportError:
# executed with no parameters, return the list of
# all groups and hosts
-NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
+NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
"/etc/ansible/nova.ini"]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
new file mode 100644
index 000000000..8d79c80c6
--- /dev/null
+++ b/openshift-ansible.spec
@@ -0,0 +1,315 @@
+# %commit is intended to be set by tito custom builders provided
+# in the .tito/lib directory. The values in this spec file will not be kept up to date.
+%{!?commit:
+%global commit c64d09e528ca433832c6b6e6f5c7734a9cc8ee6f
+}
+
+Name: openshift-ansible
+Version: 3.0.6
+Release: 1%{?dist}
+Summary: Openshift and Atomic Enterprise Ansible
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
+BuildArch: noarch
+
+Requires: ansible
+
+%description
+Openshift and Atomic Enterprise Ansible
+
+This repo contains Ansible code and playbooks
+for Openshift and Atomic Enterprise.
+
+%prep
+%setup -q
+
+%build
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py build
+popd
+
+%install
+# Base openshift-ansible install
+mkdir -p %{buildroot}%{_datadir}/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible_plugins
+
+# openshift-ansible-bin install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+cp -p bin/{ossh,oscp,opssh,opscp,ohi} %{buildroot}%{_bindir}
+cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
+cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+# Fix links
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
+ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
+
+# openshift-ansible-docs install
+# -docs are currently just %doc, no install needed
+
+# openshift-ansible-inventory install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
+cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
+cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
+
+# openshift-ansible-playbooks install
+cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-roles install
+cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-filter-plugins install
+cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# openshift-ansible-lookup-plugins install
+cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py install --skip-build --root %{buildroot}
+# Remove this line once the name change has happened
+mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
+popd
+
+# Base openshift-ansible files
+%files
+%doc LICENSE.md README*
+%dir %{_datadir}/ansible/%{name}
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-bin subpackage
+# ----------------------------------------------------------------------------------
+%package bin
+Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
+Requires: %{name}-inventory
+Requires: python2
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description bin
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%files bin
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-docs subpackage
+# ----------------------------------------------------------------------------------
+%package docs
+Summary: Openshift and Atomic Enterprise Ansible documents
+Requires: %{name}
+BuildArch: noarch
+
+%description docs
+%{summary}.
+
+%files docs
+%doc docs
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-inventory subpackage
+# ----------------------------------------------------------------------------------
+%package inventory
+Summary: Openshift and Atomic Enterprise Ansible Inventories
+Requires: python2
+BuildArch: noarch
+
+%description inventory
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%files inventory
+%config(noreplace) /etc/ansible/*
+%dir %{_datadir}/ansible/inventory
+%{_datadir}/ansible/inventory/multi_ec2.py*
+
+%package inventory-aws
+Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
+Requires: %{name}-inventory
+Requires: python-boto
+BuildArch: noarch
+
+%description inventory-aws
+Ansible Inventories for AWS used with the openshift-ansible scripts and playbooks.
+
+%files inventory-aws
+%{_datadir}/ansible/inventory/aws/ec2.py*
+
+%package inventory-gce
+Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
+Requires: %{name}-inventory
+Requires: python-libcloud >= 0.13
+BuildArch: noarch
+
+%description inventory-gce
+Ansible Inventories for GCE used with the openshift-ansible scripts and playbooks.
+
+%files inventory-gce
+%{_datadir}/ansible/inventory/gce/gce.py*
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-playbooks subpackage
+# ----------------------------------------------------------------------------------
+%package playbooks
+Summary: Openshift and Atomic Enterprise Ansible Playbooks
+Requires: %{name}
+BuildArch: noarch
+
+%description playbooks
+%{summary}.
+
+%files playbooks
+%{_datadir}/ansible/%{name}/playbooks
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-roles subpackage
+# ----------------------------------------------------------------------------------
+%package roles
+Summary: Openshift and Atomic Enterprise Ansible roles
+Requires: %{name}
+BuildArch: noarch
+
+%description roles
+%{summary}.
+
+%files roles
+%{_datadir}/ansible/%{name}/roles
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-filter-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package filter-plugins
+Summary: Openshift and Atomic Enterprise Ansible filter plugins
+Requires: %{name}
+BuildArch: noarch
+
+%description filter-plugins
+%{summary}.
+
+%files filter-plugins
+%{_datadir}/ansible_plugins/filter_plugins
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-lookup-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package lookup-plugins
+Summary: Openshift and Atomic Enterprise Ansible lookup plugins
+Requires: %{name}
+BuildArch: noarch
+
+%description lookup-plugins
+%{summary}.
+
+%files lookup-plugins
+%{_datadir}/ansible_plugins/lookup_plugins
+
+# ----------------------------------------------------------------------------------
+# atomic-openshift-utils subpackage
+# ----------------------------------------------------------------------------------
+
+%package -n atomic-openshift-utils
+Summary: Atomic OpenShift Utilities
+BuildRequires: python-setuptools
+Requires: ansible
+Requires: python-click
+Requires: python-setuptools
+Requires: PyYAML
+BuildArch: noarch
+
+%description -n atomic-openshift-utils
+Atomic OpenShift Utilities includes
+ - atomic-openshift-installer
+ - other utilities
+
+%files -n atomic-openshift-utils
+%{python_sitelib}/ooinstall*
+%{_bindir}/atomic-openshift-installer
+
+
+%changelog
+* Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
+- Adding python-boto and python-libcloud to openshift-ansible-inventory
+ dependency (kwoodson@redhat.com)
+- Use more specific enterprise version for version_greater_than_3_1_or_1_1.
+ (abutcher@redhat.com)
+- Conditionalizing the support for the v1beta3 api (bleanhar@redhat.com)
+
+* Thu Oct 29 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.5-1
+- Updating multi_ec2 to support extra_vars and extra_groups
+ (kwoodson@redhat.com)
+- Removing the template and doing to_nice_yaml instead (kwoodson@redhat.com)
+- README_AEP.md: update instructions for creating router and registry
+ (jlebon@redhat.com)
+- README_AEP: Various fixes (walters@verbum.org)
+- Fixing for extra_vars rename. (kwoodson@redhat.com)
+- make storage_plugin_deps conditional on deployment_type (jdetiber@redhat.com)
+- remove debugging pauses (jdetiber@redhat.com)
+- make storage plugin dependency installation more flexible
+ (jdetiber@redhat.com)
+- Install storage plugin dependencies (jdetiber@redhat.com)
+
+* Wed Oct 28 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.4-1
+- Removing spec files. (kwoodson@redhat.com)
+- Updated example (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.11-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.21-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.10-1].
+ (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.20-1].
+ (kwoodson@redhat.com)
+- Adding tito releasers configuration (bleanhar@redhat.com)
+- Bug fixes for the uninstall playbook (bleanhar@redhat.com)
+- Adding clone vars and groups. Renamed hostvars to extra_vars.
+ (kwoodson@redhat.com)
+- Start tracking docker info execution time (jdiaz@redhat.com)
+- The uninstall playbook should remove the kubeconfig for non-root installs
+ (bleanhar@redhat.com)
+- Adding uninstall support for Atomic Host (bleanhar@redhat.com)
+- add examples for SDN configuration (jdetiber@redhat.com)
+
+* Tue Oct 27 2015 Troy Dawson <tdawson@redhat.com> 3.0.3-1
+- Pylint fixes and ignores for incoming oo-install code. (dgoodwin@redhat.com)
+- Pylint fixes (abutcher@redhat.com)
+- Adding zabbix type and fixing zabbix agent vars (kwoodson@redhat.com)
+- Add atomic-openshift-utils add atomic-openshift-utils to openshift-
+ ansible.spec file (tdawson@redhat.com)
+- Fix quotes (spinolacastro@gmail.com)
+- Use standard library for version comparison. (abutcher@redhat.com)
+- added docker info to the end of docker loop to direct lvm playbook.
+ (twiest@redhat.com)
+- Add missing quotes (spinolacastro@gmail.com)
+- Adding Docker Log Options capabilities (epo@jemba.net)
+- Move version greater_than_fact into openshift_facts (abutcher@redhat.com)
+- Don't include proxy client cert when <3.1 or <1.1 (abutcher@redhat.com)
+- Add proxy client certs to master config. (abutcher@redhat.com)
+- Update imagestreams and quickstarts from origin (sdodson@redhat.com)
+- Get default values from openshift_facts (spinolacastro@gmail.com)
+- Cleanup (spinolacastro@gmail.com)
+- Add missing inventory example (spinolacastro@gmail.com)
+- Custom Project Config (spinolacastro@gmail.com)
+
+* Mon Oct 19 2015 Troy Dawson <tdawson@redhat.com> 3.0.2-1
+- Initial Package
+
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index 54d3ea278..c14d08e87 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -1,6 +1,9 @@
# This deletes *ALL* Docker images, and uninstalls OpenShift and
# Atomic Enterprise RPMs. It is primarily intended for use
# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
- hosts:
- OSEv3:children
@@ -8,59 +11,6 @@
sudo: yes
tasks:
- - service: name={{ item }} state=stopped
- with_items:
- - openvswitch
- - origin-master
- - origin-node
- - atomic-openshift-master
- - atomic-openshift-node
- - openshift-master
- - openshift-node
- - atomic-enterprise-master
- - atomic-enterprise-node
- - etcd
-
- - yum: name={{ item }} state=absent
- with_items:
- - openvswitch
- - etcd
- - origin
- - origin-master
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-origin-node
- - atomic-openshift
- - atomic-openshift-master
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - tuned-profiles-atomic-openshift-node
- - atomic-enterprise
- - atomic-enterprise-master
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - openshift
- - openshift-master
- - openshift-node
- - openshift-sdn-ovs
- - tuned-profiles-openshift-node
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- shell: docker ps -a -q | xargs docker stop
changed_when: False
failed_when: False
@@ -73,27 +23,6 @@
changed_when: False
failed_when: False
- - file: path={{ item }} state=absent
- with_items:
- - /etc/openshift-sdn
- - /root/.kube
- - /etc/origin
- - /etc/atomic-enterprise
- - /etc/openshift
- - /var/lib/origin
- - /var/lib/openshift
- - /var/lib/atomic-enterprise
- - /etc/sysconfig/origin-master
- - /etc/sysconfig/origin-node
- - /etc/sysconfig/atomic-openshift-master
- - /etc/sysconfig/atomic-openshift-node
- - /etc/sysconfig/openshift-master
- - /etc/sysconfig/openshift-node
- - /etc/sysconfig/atomic-enterprise-master
- - /etc/sysconfig/atomic-enterprise-node
- - /etc/etcd
- - /var/lib/etcd
-
- user: name={{ item }} state=absent remove=yes
with_items:
- alice
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index c9ae923bb..b6a2d2f26 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -27,9 +27,8 @@
gather_facts: no
vars:
- cli_volume_type: io1
+ cli_volume_type: gp2
cli_volume_size: 30
- cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -104,7 +103,6 @@
volume_size: "{{ cli_volume_size | default(30, True)}}"
volume_type: "{{ cli_volume_type }}"
device_name: /dev/xvdb
- iops: "{{ 30 * cli_volume_size }}"
register: vol
- debug: var=vol
@@ -142,10 +140,3 @@
- debug: var=dockerstart
- - name: Wait for docker to stabilize
- pause:
- seconds: 30
-
- # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- - name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..72fcd77b3
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
+ - name: start docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
+
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index ef9b45abd..63d473146 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -172,7 +172,7 @@
- name: pvmove onto new volume
command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
- async: 3600
+ async: 43200
poll: 10
- name: Remove the old docker drive from the volume group
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
index 026b24456..acfa89515 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.j2
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -7,8 +7,8 @@ storage:
cache:
layerinfo: inmemory
s3:
- accesskey: {{ accesskey }}
- secretkey: {{ secretkey }}
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
region: us-east-1
bucket: {{ clusterid }}-docker
encrypt: true
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
index 30b873db3..4dcef1a42 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.yml
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -1,20 +1,38 @@
---
# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
# Usage:
-# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster"
+# ansible-playbook s3_registry.yml -e clusterid="mycluster"
#
# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
# The 'clusterid' is the short name of your cluster.
-- hosts: security_group_{{ clusterid }}_master
+- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
remote_user: root
gather_facts: False
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+
tasks:
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
- name: Create S3 bucket
local_action:
- module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }}
+ module: s3 bucket="{{ clusterid }}-docker" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
- name: Generate docker registry config
template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
@@ -43,6 +61,9 @@
command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
when: "'dockersecrets' not in dc.stdout"
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
- name: Scale up registry
command: oc scale --replicas=1 dc/docker-registry
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..0503b7cd4
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,159 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - atomic-openshift-node
+ - etcd
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - openshift-node
+ - openvswitch
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - origin-node
+
+ - yum: name={{ item }} state=absent
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3/aep
+ - openshift3/ose
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry.access.redhat.com/openshift3
+ - registry.access.redhat.com/aep3
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/etcd
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-node
+ - /root/.kube
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/etcd
+ - /var/lib/openshift
+ - /var/lib/origin
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
index e666f0472..ae1d0127c 100644
--- a/playbooks/adhoc/upgrades/upgrade.yml
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -1,4 +1,14 @@
---
+- name: Upgrade base package on masters
+ hosts: masters
+ roles:
+ - openshift_facts
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade base package
+ yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
+
- name: Re-Run cluster configuration to apply latest configuration changes
include: ../../common/openshift-cluster/config.yml
vars:
@@ -40,7 +50,7 @@
hosts: oo_first_master
tasks:
fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
- when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2)
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
- name: Update cluster policy
hosts: oo_first_master
@@ -50,6 +60,19 @@
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --confirm
+- name: Update cluster policy bindings
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-role-bindings --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
+
- name: Upgrade default router
hosts: oo_first_master
vars:
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index a89275597..786918929 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -55,9 +55,4 @@
when: master_names is defined and master_names.0 is defined
- include: update.yml
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
-
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index b77bcdc1a..9c699120b 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -172,6 +172,7 @@
- rotate 7
- compress
- sharedscripts
+ - missingok
scripts:
postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
deleted file mode 100644
index e70709d19..000000000
--- a/playbooks/common/openshift-cluster/create_services.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
- hosts: "{{ g_svc_master }}"
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 64cf7a65b..1dec923fc 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -2,6 +2,21 @@
- name: Set master facts and determine if external etcd certs need to be generated
hosts: oo_masters_to_config
pre_tasks:
+ - name: Check for RPM generated config marker file .config_managed
+ stat:
+ path: /etc/origin/.config_managed
+ register: rpmgenerated_config
+
+ - name: Remove RPM generated config files if present
+ file:
+ path: "/etc/origin/{{ item }}"
+ state: absent
+ when: rpmgenerated_config.stat.exists == true and deployment_type in ['openshift-enterprise', 'atomic-enterprise']
+ with_items:
+ - master
+ - node
+ - .config_managed
+
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
@@ -122,6 +137,7 @@
openshift_master_certs_no_etcd:
- admin.crt
- master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- master.server.crt
- openshift-master.crt
- openshift-registry.crt
@@ -129,6 +145,7 @@
- etcd.server.crt
openshift_master_certs_etcd:
- master.etcd-client.crt
+
- set_fact:
openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
@@ -138,9 +155,9 @@
with_items: openshift_master_certs
register: g_master_cert_stat_result
- set_fact:
- master_certs_missing: "{{ g_master_cert_stat_result.results
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
| map(attribute='stat.exists')
- | list | intersect([false])}}"
+ | list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -172,6 +189,7 @@
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
with_items: masters_needing_certs
+
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
@@ -216,11 +234,19 @@
roles:
- role: openshift_master_cluster
when: openshift_master_ha | bool
- - role: openshift_examples
- when: deployment_type in ['enterprise','openshift-enterprise','origin']
+ - openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
+- name: Enable cockpit
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ roles:
+ - role: cockpit
+ when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined )
+
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
@@ -245,3 +271,10 @@
roles:
- openshift_serviceaccounts
+
+- name: Create services
+ hosts: oo_first_master
+ roles:
+ - role: openshift_router
+ when: openshift.master.infra_nodes is defined
+ #- role: openshift_registry
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index fd5dfcc72..6ca4f7395 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -10,6 +10,8 @@
- set_fact:
g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+ use_sdn: "{{ do_we_use_openshift_sdn }}"
+ sdn_plugin: "{{ sdn_network_plugin }}"
- include: ../../common/openshift-cluster/config.yml
vars:
@@ -18,7 +20,10 @@
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
+ openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}"
+ os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
new file mode 100644
index 000000000..0dfa3e9d7
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -0,0 +1,49 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ node_ip }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ node_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+#- include: config.yml
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ansible_default_ipv4.address }}"
+ openshift_use_openshift_sdn: true
+ openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
+ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 7a3b80da0..c22b897d5 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -34,27 +34,28 @@
count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
- instances: "{{ infra_names }}"
+ instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - set_fact:
- a_infra: "{{ infra_names[0] }}"
- - add_host: name={{ a_infra }} groups=service_master
+ - add_host:
+ name: "{{ master_names.0 }}"
+ groups: service_master
+ when: master_names is defined and master_names.0 is defined
- include: update.yml
-
-- name: Deploy OpenShift Services
- hosts: service_master
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
+#
+#- name: Deploy OpenShift Services
+# hosts: service_master
+# connection: ssh
+# gather_facts: yes
+# roles:
+# - openshift_registry
+# - openshift_router
+#
+#- include: ../../common/openshift-cluster/create_services.yml
+# vars:
+# g_svc_master: "{{ service_master }}"
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 5ba0f5a48..53b2b9a5e 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -14,11 +14,11 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List instance(s)
hosts: oo_list_hosts
gather_facts: no
tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+ msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 6307ecc27..c428cb465 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -10,14 +10,33 @@
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+ #service_account_permissions: "datastore,logging-write"
tags:
- created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
- env-{{ cluster }}
- host-type-{{ type }}
- - sub-host-type-{{ sub_host_type }}
+ - sub-host-type-{{ g_sub_host_type }}
- env-host-type-{{ cluster }}-openshift-{{ type }}
+ when: instances |length > 0
register: gce
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ g_sub_host_type }}"
+ when: instances |length > 0 and type == "node"
+
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ type }}"
+ when: instances |length > 0 and type != "node"
+
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
@@ -27,16 +46,17 @@
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
+ openshift_node_labels: "{{ node_label }}"
+ with_items: gce.instance_data | default([], true)
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
+ with_items: gce.instance_data | default([], true)
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
+ retries: 30
+ delay: 5
+ with_items: gce.instance_data | default([], true)
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 098b0df73..e20e0a8bc 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,25 +1,18 @@
---
- name: Terminate instance(s)
hosts: localhost
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
- add_host:
name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_nodes_to_terminate
+ groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_masters_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -32,14 +25,34 @@
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
-- include: ../openshift-node/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- name: Terminate instances(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ when: item is defined
-- include: ../openshift-master/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ae33083b9..6de007807 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,8 +1,11 @@
---
+do_we_use_openshift_sdn: true
+sdn_network_plugin: redhat/openshift-ovs-subnet
+# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
deployment_vars:
origin:
- image: centos-7
- ssh_user:
+ image: preinstalled-slave-50g-v5
+ ssh_user: root
sudo: yes
online:
image: libra-rhel7
@@ -12,4 +15,3 @@ deployment_vars:
image: rhel-7
ssh_user:
sudo: yes
-
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 2a0c90b46..4b91c6da8 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -64,7 +64,7 @@
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
retries: 60
- delay: 1
+ delay: 3
when: instances | length != 0
- name: Collect IP addresses of the VMs
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 86dcd62bb..050bc7ab9 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -8,7 +8,7 @@
<!-- TODO: query for first available virbr interface available -->
<bridge name='virbr3' stp='on' delay='0'/>
<!-- TODO: make overridable -->
- <domain name='example.com'/>
+ <domain name='example.com' localOnly='yes' />
<dns>
<!-- TODO: automatically add host entries -->
</dns>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index 77b788109..eacae7c7e 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -19,5 +19,5 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-bootcmd:
+runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/roles/cockpit/defaults/main.yml b/roles/cockpit/defaults/main.yml
new file mode 100644
index 000000000..ffd55f1dd
--- /dev/null
+++ b/roles/cockpit/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+os_firewall_use_firewalld: false
+os_firewall_allow:
+- service: cockpit-ws
+ port: 9090/tcp
diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml
new file mode 100644
index 000000000..1e3948b19
--- /dev/null
+++ b/roles/cockpit/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Scott Dodson
+ description: Deploy and Enable cockpit-ws plus optional plugins
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+ - { role: os_firewall }
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
new file mode 100644
index 000000000..875cbad21
--- /dev/null
+++ b/roles/cockpit/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+- name: Install cockpit-ws
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - cockpit-ws
+ - cockpit-shell
+ - cockpit-bridge
+ - "{{ cockpit_plugins }}"
+
+- name: Enable cockpit-ws
+ service:
+ name: cockpit.socket
+ enabled: true
+ state: started
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 49207c428..88e4ff874 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -17,7 +17,7 @@ TODO
Dependencies
------------
-None
+etcd-common
Example Playbook
----------------
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 0f216b84e..0fd3de585 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -2,16 +2,8 @@
etcd_interface: "{{ ansible_default_ipv4.interface }}"
etcd_client_port: 2379
etcd_peer_port: 2380
-etcd_peers_group: etcd
etcd_url_scheme: http
etcd_peer_url_scheme: http
-etcd_conf_dir: /etc/etcd
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
etcd_initial_cluster_state: new
etcd_initial_cluster_token: etcd-cluster-1
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
index b897913f9..4c0efb97b 100644
--- a/roles/etcd/handlers/main.yml
+++ b/roles/etcd/handlers/main.yml
@@ -1,3 +1,4 @@
---
- name: restart etcd
service: name=etcd state=restarted
+ when: not etcd_service_status_changed | default(false)
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
index 92d44ef4d..a71b36237 100644
--- a/roles/etcd/meta/main.yml
+++ b/roles/etcd/meta/main.yml
@@ -17,4 +17,4 @@ galaxy_info:
- system
dependencies:
- { role: os_firewall }
-- { role: openshift_repos }
+- { role: etcd_common }
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 656901409..fcbdecd37 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -1,4 +1,12 @@
---
+- fail:
+ msg: Interface {{ etcd_interface }} not found
+ when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
+
+- fail:
+ msg: IPv4 address not found for {{ etcd_interface }}
+ when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+
- name: Install etcd
yum: pkg=etcd-2.* state=present
@@ -49,5 +57,5 @@
enabled: yes
register: start_result
-- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ etcd_service_status_changed = start_result | changed
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 9ac23b1dd..32577c96c 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -1,9 +1,9 @@
{% macro initial_cluster() -%}
{% for host in groups[etcd_peers_group] -%}
{% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
{%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }},
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
{% endmacro -%}
diff --git a/roles/etcd_ca/meta/main.yml b/roles/etcd_ca/meta/main.yml
index fb9280c9e..d02456ca3 100644
--- a/roles/etcd_ca/meta/main.yml
+++ b/roles/etcd_ca/meta/main.yml
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_repos }
+- { role: etcd_common }
diff --git a/roles/etcd_ca/tasks/main.yml b/roles/etcd_ca/tasks/main.yml
index 625756867..d32f5e48c 100644
--- a/roles/etcd_ca/tasks/main.yml
+++ b/roles/etcd_ca/tasks/main.yml
@@ -1,14 +1,14 @@
---
- file:
- path: "{{ etcd_ca_dir }}/{{ item }}"
+ path: "{{ item }}"
state: directory
mode: 0700
owner: root
group: root
with_items:
- - certs
- - crl
- - fragments
+ - "{{ etcd_ca_new_certs_dir }}"
+ - "{{ etcd_ca_crl_dir }}"
+ - "{{ etcd_ca_dir }}/fragments"
- command: cp /etc/pki/tls/openssl.cnf ./
args:
@@ -22,25 +22,25 @@
- assemble:
src: "{{ etcd_ca_dir }}/fragments"
- dest: "{{ etcd_ca_dir }}/openssl.cnf"
+ dest: "{{ etcd_openssl_conf }}"
-- command: touch index.txt
+- command: touch {{ etcd_ca_db }}
args:
- chdir: "{{ etcd_ca_dir }}"
- creates: "{{ etcd_ca_dir }}/index.txt"
+ creates: "{{ etcd_ca_db }}"
- copy:
- dest: "{{ etcd_ca_dir }}/serial"
+ dest: "{{ etcd_ca_serial }}"
content: "01"
force: no
- command: >
- openssl req -config openssl.cnf -newkey rsa:4096
- -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self
- -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
- -days 365
+ openssl req -config {{ etcd_openssl_conf }} -newkey rsa:4096
+ -keyout {{ etcd_ca_key }} -new -out {{ etcd_ca_cert }}
+ -x509 -extensions {{ etcd_ca_exts_self }} -batch -nodes
+ -days {{ etcd_ca_default_days }}
+ -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
args:
chdir: "{{ etcd_ca_dir }}"
- creates: "{{ etcd_ca_dir }}/ca.crt"
+ creates: "{{ etcd_ca_cert }}"
environment:
- SAN: ''
+ SAN: 'etcd-signer'
diff --git a/roles/etcd_ca/templates/openssl_append.j2 b/roles/etcd_ca/templates/openssl_append.j2
index de2adaead..f28316fc2 100644
--- a/roles/etcd_ca/templates/openssl_append.j2
+++ b/roles/etcd_ca/templates/openssl_append.j2
@@ -1,20 +1,20 @@
-[ etcd_v3_req ]
+[ {{ etcd_req_ext }} ]
basicConstraints = critical,CA:FALSE
keyUsage = digitalSignature,keyEncipherment
subjectAltName = ${ENV::SAN}
-[ etcd_ca ]
+[ {{ etcd_ca_name }} ]
dir = {{ etcd_ca_dir }}
-crl_dir = $dir/crl
-database = $dir/index.txt
-new_certs_dir = $dir/certs
-certificate = $dir/ca.crt
-serial = $dir/serial
-private_key = $dir/ca.key
-crl_number = $dir/crlnumber
-x509_extensions = etcd_v3_ca_client
-default_days = 365
+crl_dir = {{ etcd_ca_crl_dir }}
+database = {{ etcd_ca_db }}
+new_certs_dir = {{ etcd_ca_new_certs_dir }}
+certificate = {{ etcd_ca_cert }}
+serial = {{ etcd_ca_serial }}
+private_key = {{ etcd_ca_key }}
+crl_number = {{ etcd_ca_crl_number }}
+x509_extensions = {{ etcd_ca_exts_client }}
+default_days = {{ etcd_ca_default_days }}
default_md = sha256
preserve = no
name_opt = ca_default
@@ -23,27 +23,27 @@ policy = policy_anything
unique_subject = no
copy_extensions = copy
-[ etcd_v3_ca_self ]
+[ {{ etcd_ca_exts_self }} ]
authorityKeyIdentifier = keyid,issuer
basicConstraints = critical,CA:TRUE,pathlen:0
keyUsage = critical,digitalSignature,keyEncipherment,keyCertSign
subjectKeyIdentifier = hash
-[ etcd_v3_ca_peer ]
+[ {{ etcd_ca_exts_peer }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = clientAuth,serverAuth
keyUsage = digitalSignature,keyEncipherment
subjectKeyIdentifier = hash
-[ etcd_v3_ca_server ]
+[ {{ etcd_ca_exts_server }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = serverAuth
keyUsage = digitalSignature,keyEncipherment
subjectKeyIdentifier = hash
-[ etcd_v3_ca_client ]
+[ {{ etcd_ca_exts_client }} ]
authorityKeyIdentifier = keyid,issuer:always
basicConstraints = critical,CA:FALSE
extendedKeyUsage = clientAuth
diff --git a/roles/etcd_ca/vars/main.yml b/roles/etcd_ca/vars/main.yml
deleted file mode 100644
index 901e95027..000000000
--- a/roles/etcd_ca/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca
diff --git a/roles/etcd_certificates/tasks/client.yml b/roles/etcd_certificates/tasks/client.yml
index 28f33f442..6aa4883e0 100644
--- a/roles/etcd_certificates/tasks/client.yml
+++ b/roles/etcd_certificates/tasks/client.yml
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'client.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ item.openshift.common.ip }}"
with_items: etcd_needing_client_certs
- file:
diff --git a/roles/etcd_certificates/tasks/main.yml b/roles/etcd_certificates/tasks/main.yml
index da875e8ea..3bb715943 100644
--- a/roles/etcd_certificates/tasks/main.yml
+++ b/roles/etcd_certificates/tasks/main.yml
@@ -4,6 +4,3 @@
- include: server.yml
when: etcd_needing_server_certs is defined and etcd_needing_server_certs
-
-
-
diff --git a/roles/etcd_certificates/tasks/server.yml b/roles/etcd_certificates/tasks/server.yml
index 727b7fa2c..3499dcbef 100644
--- a/roles/etcd_certificates/tasks/server.yml
+++ b/roles/etcd_certificates/tasks/server.yml
@@ -18,7 +18,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Sign and create the server crt
@@ -32,7 +32,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'server.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Create the peer csr
@@ -47,7 +47,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.csr' }}"
environment:
- SAN: "IP:{{ item.openshift.common.ip }}"
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- name: Sign and create the peer crt
@@ -61,7 +61,7 @@
creates: "{{ etcd_generated_certs_dir ~ '/' ~ item.etcd_cert_subdir ~ '/'
~ item.etcd_cert_prefix ~ 'peer.crt' }}"
environment:
- SAN: ''
+ SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
with_items: etcd_needing_server_certs
- file:
@@ -69,5 +69,3 @@
dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
state: hard
with_items: etcd_needing_server_certs
-
-
diff --git a/roles/etcd_certificates/vars/main.yml b/roles/etcd_certificates/vars/main.yml
deleted file mode 100644
index 0eaeeb82b..000000000
--- a/roles/etcd_certificates/vars/main.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca
-etcd_generated_certs_dir: /etc/etcd/generated_certs
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server
diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md
new file mode 100644
index 000000000..131a01490
--- /dev/null
+++ b/roles/etcd_common/README.md
@@ -0,0 +1,34 @@
+etcd_common
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+openshift-repos
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml
new file mode 100644
index 000000000..96f4b63af
--- /dev/null
+++ b/roles/etcd_common/defaults/main.yml
@@ -0,0 +1,30 @@
+---
+etcd_peers_group: etcd
+
+# etcd server vars
+etcd_conf_dir: /etc/etcd
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+# etcd ca vars
+etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
+etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server
+etcd_ca_exts_self: etcd_v3_ca_self
+etcd_ca_exts_client: etcd_v3_ca_client
+etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
+etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
+etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
+etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
+etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
+etcd_ca_default_days: 365
diff --git a/roles/etcd_common/meta/main.yml b/roles/etcd_common/meta/main.yml
new file mode 100644
index 000000000..fb9280c9e
--- /dev/null
+++ b/roles/etcd_common/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_repos }
diff --git a/roles/etcd_common/tasks/main.yml b/roles/etcd_common/tasks/main.yml
new file mode 100644
index 000000000..cd108495d
--- /dev/null
+++ b/roles/etcd_common/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- set_fact:
+ etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
+
+- fail:
+ msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
+ when: "'etcd_interface' in item.value and 'interface' not in item.value"
+ with_dict: etcd_host_int_map
+
+- fail:
+ msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
+ when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
+ with_dict: etcd_host_int_map
diff --git a/roles/etcd_common/templates/host_int_map.j2 b/roles/etcd_common/templates/host_int_map.j2
new file mode 100644
index 000000000..9c9c76413
--- /dev/null
+++ b/roles/etcd_common/templates/host_int_map.j2
@@ -0,0 +1,13 @@
+---
+{% for host in groups[etcd_peers_group] %}
+{% set entry=hostvars[host] %}
+{{ entry.inventory_hostname }}:
+{% if 'etcd_interface' in entry %}
+ etcd_interface: {{ entry.etcd_interface }}
+{% if entry.etcd_interface in entry.ansible_interfaces %}
+ interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
+{% endif %}
+{% else %}
+ interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
+{% endif %}
+{% endfor %}
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
index 2ccc21292..5dc3cff9b 100644
--- a/roles/lib_zabbix/library/zbx_item.py
+++ b/roles/lib_zabbix/library/zbx_item.py
@@ -53,6 +53,8 @@ def get_value_type(value_type):
vtype = 0
if 'int' in value_type:
vtype = 3
+ elif 'log' in value_type:
+ vtype = 2
elif 'char' in value_type:
vtype = 1
elif 'str' in value_type:
@@ -105,6 +107,39 @@ def get_multiplier(inval):
return rval, 0
+def get_zabbix_type(ztype):
+ '''
+ Determine which type of discoverrule this is
+ '''
+ _types = {'agent': 0,
+ 'SNMPv1': 1,
+ 'trapper': 2,
+ 'simple': 3,
+ 'SNMPv2': 4,
+ 'internal': 5,
+ 'SNMPv3': 6,
+ 'active': 7,
+ 'aggregate': 8,
+ 'web': 9,
+ 'external': 10,
+ 'database monitor': 11,
+ 'ipmi': 12,
+ 'ssh': 13,
+ 'telnet': 14,
+ 'calculated': 15,
+ 'JMX': 16,
+ 'SNMP trap': 17,
+ }
+
+ for typ in _types.keys():
+ if ztype in typ or ztype == typ:
+ _vtype = _types[typ]
+ break
+ else:
+ _vtype = 2
+
+ return _vtype
+
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
@@ -121,8 +156,10 @@ def main():
name=dict(default=None, type='str'),
key=dict(default=None, type='str'),
template_name=dict(default=None, type='str'),
- zabbix_type=dict(default=2, type='int'),
+ zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='int', type='str'),
+ interval=dict(default=60, type='int'),
+ delta=dict(default=0, type='int'),
multiplier=dict(default=None, type='str'),
description=dict(default=None, type='str'),
units=dict(default=None, type='str'),
@@ -180,13 +217,15 @@ def main():
params = {'name': module.params.get('name', module.params['key']),
'key_': module.params['key'],
'hostid': templateid[0],
- 'type': module.params['zabbix_type'],
+ 'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
'applications': get_app_ids(module.params['applications'], app_name_ids),
'formula': formula,
'multiplier': use_multiplier,
'description': module.params['description'],
'units': module.params['units'],
+ 'delay': module.params['interval'],
+ 'delta': module.params['delta'],
}
# Remove any None valued params
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index 4ec1b8e02..e7fd6fa21 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -128,12 +128,12 @@ def get_status(status):
return _status
-def get_app_ids(zapi, application_names):
+def get_app_ids(zapi, application_names, templateid):
''' get application ids from names
'''
app_ids = []
for app_name in application_names:
- content = zapi.get_content('application', 'get', {'search': {'name': app_name}})
+ content = zapi.get_content('application', 'get', {'filter': {'name': app_name}, 'templateids': templateid})
if content.has_key('result'):
app_ids.append(content['result'][0]['applicationid'])
return app_ids
@@ -212,7 +212,7 @@ def main():
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
'type': get_type(module.params['ztype']),
'value_type': get_value_type(module.params['value_type']),
- 'applications': get_app_ids(zapi, module.params['applications']),
+ 'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
'description': module.params['description'],
}
diff --git a/roles/lib_zabbix/library/zbx_trigger.py b/roles/lib_zabbix/library/zbx_trigger.py
index 21d0fcfd2..ab7731faa 100644
--- a/roles/lib_zabbix/library/zbx_trigger.py
+++ b/roles/lib_zabbix/library/zbx_trigger.py
@@ -86,6 +86,24 @@ def get_trigger_status(inc_status):
return r_status
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ template_ids = []
+ app_ids = {}
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'search': {'host': template_name},
+ 'selectApplications': ['applicationid', 'name']})
+ if content.has_key('result'):
+ template_ids.append(content['result'][0]['templateid'])
+ for app in content['result'][0]['applications']:
+ app_ids[app['name']] = app['applicationid']
+
+ return template_ids, app_ids
+
def main():
'''
Create a trigger in zabbix
@@ -117,6 +135,7 @@ def main():
url=dict(default=None, type='str'),
status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
+ template_name=dict(default=None, type='str'),
),
#supports_check_mode=True
)
@@ -132,11 +151,16 @@ def main():
state = module.params['state']
tname = module.params['name']
+ templateid = None
+ if module.params['template_name']:
+ templateid, _ = get_template_id(zapi, module.params['template_name'])
+
content = zapi.get_content(zbx_class_name,
'get',
{'filter': {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
+ 'templateids': templateid,
})
# Get
diff --git a/roles/lib_zabbix/library/zbx_user_media.py b/roles/lib_zabbix/library/zbx_user_media.py
index 9ed838f81..8895c78c3 100644
--- a/roles/lib_zabbix/library/zbx_user_media.py
+++ b/roles/lib_zabbix/library/zbx_user_media.py
@@ -260,6 +260,9 @@ def main():
for user in params['users']:
diff['users']['userid'] = user['userid']
+ # Medias have no real unique key so therefore we need to make it like the incoming user's request
+ diff['medias'] = medias
+
# We have differences and need to update
content = zapi.get_content(zbx_class_name, 'updatemedia', diff)
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 62259b680..ac9cf756b 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -9,7 +9,8 @@
- set_fact:
- lzbx_applications: "{{ template.zitems | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
+ lzbx_item_applications: "{{ template.zitems | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
+ lzbx_itemprototype_applications: "{{ template.zitemprototypes | default([], True) | oo_select_keys_from_list(['applications']) | oo_flatten | unique }}"
- name: Create Application
zbx_application:
@@ -18,9 +19,11 @@
zbx_password: "{{ password }}"
name: "{{ item }}"
template_name: "{{ template.name }}"
- with_items: lzbx_applications
+ with_items:
+ - "{{ lzbx_item_applications }}"
+ - "{{ lzbx_itemprototype_applications }}"
register: created_application
- when: template.zitems is defined
+ when: template.zitems is defined or template.zitemprototypes is defined
- name: Create Items
zbx_item:
@@ -35,6 +38,9 @@
units: "{{ item.units | default('', True) }}"
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
+ zabbix_type: "{{ item.zabbix_type | default('trapper') }}"
+ interval: "{{ item.interval | default(60, True) }}"
+ delta: "{{ item.delta | default(0, True) }}"
with_items: template.zitems
register: created_items
when: template.zitems is defined
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index 5fe77e38b..9cc15c0a8 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -3,8 +3,9 @@
name: openshift-ansible-inventory
state: present
-- template:
- src: multi_ec2.yaml.j2
+- name:
+ copy:
+ content: "{{ oo_inventory_accounts | to_nice_yaml }}"
dest: /etc/ansible/multi_ec2.yaml
group: "{{ oo_inventory_group }}"
owner: "{{ oo_inventory_owner }}"
@@ -39,5 +40,5 @@
owner: root
group: libra_ops
recurse: yes
- mode: '2750'
+ mode: '2770'
when: oo_inventory_cache_location is defined
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
deleted file mode 100644
index 8228ab915..000000000
--- a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-# multi ec2 inventory configs
-cache_max_age: {{ oo_inventory_cache_max_age }}
-cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }}
-accounts:
-{% for account in oo_inventory_accounts %}
- - name: {{ account.name }}
- provider: {{ account.provider }}
- provider_config:
-{% for section, items in account.provider_config.items() %}
- {{ section }}:
-{% for property, value in items.items() %}
- {{ property }}: {{ value }}
-{% endfor %}
-{% endfor %}
- env_vars:
- AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
-{% if account.all_group is defined and account.hostvars is defined%}
- all_group: {{ account.all_group }}
- hostvars:
-{% for property, value in account.hostvars.items() %}
- {{ property }}: {{ value }}
-{% endfor %}
-{% endif %}
-
-{% endfor %}
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 7d4f100e3..2043985ec 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -1,9 +1,9 @@
---
# By default install rhel and xpaas streams on enterprise installs
-openshift_examples_load_centos: "{{ openshift_deployment_type != 'enterprise' }}"
-openshift_examples_load_rhel: "{{ openshift_deployment_type == 'enterprise' }}"
+openshift_examples_load_centos: "{{ openshift_deployment_type not in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
+openshift_examples_load_rhel: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
openshift_examples_load_db_templates: true
-openshift_examples_load_xpaas: "{{ openshift_deployment_type == 'enterprise' }}"
+openshift_examples_load_xpaas: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
openshift_examples_load_quickstarts: true
examples_base: /usr/share/openshift/examples
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 3047a3add..21137e31b 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -15,7 +15,7 @@ wget https://github.com/openshift/rails-ex/archive/master.zip -O rails-ex-master
wget https://github.com/openshift/nodejs-ex/archive/master.zip -O nodejs-ex-master.zip
wget https://github.com/openshift/dancer-ex/archive/master.zip -O dancer-ex-master.zip
wget https://github.com/openshift/cakephp-ex/archive/master.zip -O cakephp-ex-master.zip
-wget https://github.com/jboss-openshift/application-templates/archive/master.zip -O application-templates-master.zip
+wget https://github.com/jboss-openshift/application-templates/archive/ose-v1.0.2.zip -O application-templates-master.zip
unzip origin-master.zip
unzip django-ex-master.zip
unzip rails-ex-master.zip
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
index f213d99ca..268d680f4 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
@@ -23,7 +23,8 @@
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
- "version": "2.0"
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -53,7 +54,8 @@
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
- "version": "0.10"
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -83,7 +85,8 @@
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
- "version": "5.16"
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -113,7 +116,8 @@
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
- "version": "5.5"
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -143,7 +147,8 @@
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
- "version": "3.3"
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -173,7 +178,8 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
- "version": "8.1"
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
},
"from": {
"Kind": "ImageStreamTag",
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
index 8c125f76a..aa62ebd53 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
@@ -23,7 +23,8 @@
"iconClass": "icon-ruby",
"tags": "builder,ruby",
"supports": "ruby:2.0,ruby",
- "version": "2.0"
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -53,7 +54,8 @@
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
"supports":"nodejs:0.10,nodejs:0.1,nodejs",
- "version": "0.10"
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -83,7 +85,8 @@
"iconClass": "icon-perl",
"tags": "builder,perl",
"supports":"perl:5.16,perl",
- "version": "5.16"
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -113,7 +116,8 @@
"iconClass": "icon-php",
"tags": "builder,php",
"supports":"php:5.5,php",
- "version": "5.5"
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
@@ -143,7 +147,8 @@
"iconClass": "icon-python",
"tags": "builder,python",
"supports":"python:3.3,python",
- "version": "3.3"
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
},
"from": {
"Kind": "ImageStreamTag",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
index deac2010f..da5679444 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
@@ -201,6 +201,10 @@
{
"name": "CAKEPHP_SECURITY_CIPHER_SEED",
"value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
}
]
}
@@ -364,6 +368,11 @@
"description": "Security cipher seed for session hash",
"generate": "expression",
"from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
index ec556ea13..f426e1dd6 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
@@ -190,6 +190,10 @@
{
"name": "CAKEPHP_SECURITY_CIPHER_SEED",
"value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
}
]
}
@@ -261,6 +265,11 @@
"description": "Security cipher seed for session hash",
"generate": "expression",
"from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
index 2cbcc0889..55f655102 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
@@ -175,6 +175,10 @@
{
"name": "SECRET_KEY_BASE",
"value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
}
]
}
@@ -330,6 +334,11 @@
"value": "openshift/mysql-55-centos7"
},
{
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
+ },
+ {
"name": "SECRET_KEY_BASE",
"description": "Your secret key for verifying the integrity of signed cookies",
"generate": "expression",
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
index 43271dfc5..3ee19be83 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
@@ -157,6 +157,12 @@
{
"containerPort": 8080
}
+ ],
+ "env": [
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
]
}
]
@@ -195,6 +201,11 @@
"description": "Your secret key for verifying the integrity of signed cookies",
"generate": "expression",
"from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
index 017b5be19..8760b074c 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
@@ -102,6 +102,12 @@
"github": {
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
}
]
}
@@ -298,6 +304,12 @@
"from": "[a-zA-Z0-9]{40}"
},
{
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name",
"value": "mongodb"
@@ -328,7 +340,7 @@
{
"name": "MONGODB_IMAGE",
"description": "Image to use for mongodb",
- "value": "openshift/mongodb-24-centos7"
+ "value": "openshift/mongodb-24-centos7"
}
]
}
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
index 55488ab41..e047266e3 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
@@ -102,6 +102,12 @@
"github": {
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
}
]
}
@@ -213,6 +219,12 @@
"from": "[a-zA-Z0-9]{40}"
},
{
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
"name": "DATABASE_SERVICE_NAME",
"description": "Database service name"
},
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
index 0497e6824..5df36ccc2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
+++ b/roles/openshift_examples/files/examples/xpaas-templates/eap6-https-sti.json
@@ -6,10 +6,10 @@
"iconClass" : "icon-jboss",
"description": "Application template for EAP 6 applications built using STI."
},
- "name": "eap6-basic-sti"
+ "name": "eap6-https-sti"
},
"labels": {
- "template": "eap6-basic-sti"
+ "template": "eap6-https-sti"
},
"parameters": [
{
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 3a829a4c6..40b7a5d6e 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -32,7 +32,7 @@
- name: Import quickstart-templates
command: >
{{ openshift.common.client_binary }} {{ openshift_examples_import_command }} -n openshift -f {{ quickstarts_base }}
- when: openshift_examples_load_quickstarts
+ when: openshift_examples_load_quickstarts | bool
register: oex_import_quickstarts
failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0"
changed_when: false
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index f708f9bac..e5aeb9244 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,6 +1,10 @@
#!/usr/bin/python
+# pylint: disable=too-many-lines
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+# Reason: Disable pylint too-many-lines because we don't want to split up this file.
+# Status: Permanently disabled to keep this module as self-contained as possible.
+
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
@@ -17,6 +21,7 @@ import ConfigParser
import copy
import os
from distutils.util import strtobool
+from distutils.version import LooseVersion
def hostname_valid(hostname):
@@ -296,9 +301,8 @@ def set_fluentd_facts_if_unset(facts):
"""
if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
if 'use_fluentd' not in facts['common']:
- use_fluentd = True if deployment_type == 'online' else False
+ use_fluentd = False
facts['common']['use_fluentd'] = use_fluentd
return facts
@@ -319,6 +323,29 @@ def set_node_schedulability(facts):
facts['node']['schedulable'] = True
return facts
+def set_master_selectors(facts):
+ """ Set selectors facts if not already present in facts dict
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated selectors
+ facts if they were not already present
+
+ """
+ if 'master' in facts:
+ if 'infra_nodes' in facts['master']:
+ deployment_type = facts['common']['deployment_type']
+ if deployment_type == 'online':
+ selector = "type=infra"
+ else:
+ selector = "region=infra"
+
+ if 'router_selector' not in facts['master']:
+ facts['master']['router_selector'] = selector
+ if 'registry_selector' not in facts['master']:
+ facts['master']['registry_selector'] = selector
+ return facts
+
def set_metrics_facts_if_unset(facts):
""" Set cluster metrics facts if not already present in facts dict
dict: the facts dict updated with the generated cluster metrics facts if
@@ -336,6 +363,33 @@ def set_metrics_facts_if_unset(facts):
facts['common']['use_cluster_metrics'] = use_cluster_metrics
return facts
+def set_project_cfg_facts_if_unset(facts):
+ """ Set Project Configuration facts if not already present in facts dict
+ dict:
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated Project Configuration
+ facts if they were not already present
+
+ """
+
+ config = {
+ 'default_node_selector': '',
+ 'project_request_message': '',
+ 'project_request_template': '',
+ 'mcs_allocator_range': 's0:/2',
+ 'mcs_labels_per_project': 5,
+ 'uid_allocator_range': '1000000000-1999999999/10000'
+ }
+
+ if 'master' in facts:
+ for key, value in config.items():
+ if key not in facts['master']:
+ facts['master'][key] = value
+
+ return facts
+
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
@@ -446,7 +500,7 @@ def set_aggregate_facts(facts):
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes common.service_type, common.config_base, master.registry_url,
- node.registry_url
+ node.registry_url, node.storage_plugin_deps
Args:
facts (dict): existing facts
@@ -454,8 +508,9 @@ def set_deployment_facts_if_unset(facts):
dict: the facts dict updated with the generated deployment_type
facts
"""
- # Perhaps re-factor this as a map?
- # pylint: disable=too-many-branches
+ # disabled to avoid breaking up facts related to deployment type into
+ # multiple methods for now.
+ # pylint: disable=too-many-statements, too-many-branches
if 'common' in facts:
deployment_type = facts['common']['deployment_type']
if 'service_type' not in facts['common']:
@@ -475,18 +530,46 @@ def set_deployment_facts_if_unset(facts):
if deployment_type in ['enterprise', 'online']:
data_dir = '/var/lib/openshift'
facts['common']['data_dir'] = data_dir
+ facts['common']['version'] = version = get_openshift_version()
+ if version is not None:
+ if deployment_type == 'origin':
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('1.0.6')
+ else:
+ version_gt_3_1_or_1_1 = LooseVersion(version) > LooseVersion('3.0.2.900')
+ else:
+ version_gt_3_1_or_1_1 = True
+ facts['common']['version_greater_than_3_1_or_1_1'] = version_gt_3_1_or_1_1
for role in ('master', 'node'):
if role in facts:
deployment_type = facts['common']['deployment_type']
if 'registry_url' not in facts[role]:
- registry_url = 'aos3/aos-${component}:${version}'
- if deployment_type in ['enterprise', 'online']:
+ registry_url = 'openshift/origin-${component}:${version}'
+ if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
registry_url = 'openshift3/ose-${component}:${version}'
- elif deployment_type == 'origin':
- registry_url = 'openshift/origin-${component}:${version}'
+ elif deployment_type == 'atomic-enterprise':
+ registry_url = 'aep3/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
+ if 'master' in facts:
+ deployment_type = facts['common']['deployment_type']
+ openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
+ if 'disabled_features' in facts['master']:
+ if deployment_type == 'atomic-enterprise':
+ curr_disabled_features = set(facts['master']['disabled_features'])
+ facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
+ else:
+ if deployment_type == 'atomic-enterprise':
+ facts['master']['disabled_features'] = openshift_features
+
+ if 'node' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'storage_plugin_deps' not in facts['node']:
+ if deployment_type in ['openshift-enterprise', 'atomic-enterprise']:
+ facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs']
+ else:
+ facts['node']['storage_plugin_deps'] = []
+
return facts
@@ -599,6 +682,21 @@ def get_current_config(facts):
return current_config
+def get_openshift_version():
+ """ Get current version of openshift on the host
+
+ Returns:
+ version: the current openshift version
+ """
+ version = None
+
+ if os.path.isfile('/usr/bin/openshift'):
+ _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
+ versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
+ version = versions.get('openshift', '')
+
+ #TODO: acknowledge the possility of a containerized install
+ return version
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
@@ -765,8 +863,10 @@ class OpenShiftFacts(object):
facts = merge_facts(facts, local_facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
+ facts = set_project_cfg_facts_if_unset(facts)
facts = set_fluentd_facts_if_unset(facts)
facts = set_node_schedulability(facts)
+ facts = set_master_selectors(facts)
facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index fd3d20800..6301d4fc0 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,5 +1,5 @@
---
-- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0 and not 1.9.0.1
assert:
that:
- ansible_version | version_compare('1.8.0', 'ge')
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index 7c4f45ce6..637e494ea 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -1,21 +1,21 @@
- name: Wait for Node Registration
command: >
- {{ openshift.common.client_binary }} get node {{ item }}
+ {{ openshift.common.client_binary }} get node {{ item | lower }}
register: omd_get_node
until: omd_get_node.rc == 0
- retries: 10
+ retries: 20
delay: 5
with_items: openshift_nodes
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ item.openshift.common.hostname }} --schedulable={{ 'true' if item.openshift.node.schedulable | bool else 'false' }}
+ {{ openshift.common.admin_binary }} manage-node {{ item.openshift.common.hostname | lower }} --schedulable={{ 'true' if item.openshift.node.schedulable | bool else 'false' }}
with_items:
- "{{ openshift_node_vars }}"
- name: Label nodes
command: >
- {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname }} {{ item.openshift.node.labels | oo_combine_dict }}
+ {{ openshift.common.client_binary }} label --overwrite node {{ item.openshift.common.hostname | lower }} {{ item.openshift.node.labels | oo_combine_dict }}
with_items:
- "{{ openshift_node_vars }}"
when: "'labels' in item.openshift.node and item.openshift.node.labels != {}"
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 2981979e0..37028e0f6 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,4 @@
---
- name: restart master
service: name={{ openshift.common.service_type }}-master state=restarted
- when: not openshift_master_ha | bool
+ when: (not openshift_master_ha | bool) and (not master_service_status_changed | default(false))
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index fa12005ab..3a886935f 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -52,27 +52,22 @@
default_subdomain: "{{ osm_default_subdomain | default(None) }}"
custom_cors_origins: "{{ osm_custom_cors_origins | default(None) }}"
default_node_selector: "{{ osm_default_node_selector | default(None) }}"
+ project_request_message: "{{ osm_project_request_message | default(None) }}"
+ project_request_template: "{{ osm_project_request_template | default(None) }}"
+ mcs_allocator_range: "{{ osm_mcs_allocator_range | default(None) }}"
+ mcs_labels_per_project: "{{ osm_mcs_labels_per_project | default(None) }}"
+ uid_allocator_range: "{{ osm_uid_allocator_range | default(None) }}"
+ router_selector: "{{ openshift_router_selector | default(None) }}"
+ registry_selector: "{{ openshift_registry_selector | default(None) }}"
api_server_args: "{{ osm_api_server_args | default(None) }}"
controller_args: "{{ osm_controller_args | default(None) }}"
+ infra_nodes: "{{ num_infra | default(None) }}"
+ disabled_features: "{{ osm_disabled_features | default(None) }}"
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
register: install_result
-- name: Check for RPM generated config marker file /etc/origin/.config_managed
- stat: path=/etc/origin/.rpmgenerated
- register: rpmgenerated_config
-
-- name: Remove RPM generated config files
- file:
- path: "{{ item }}"
- state: absent
- when: openshift.common.service_type in ['atomic-enterprise','openshift-enterprise'] and rpmgenerated_config.stat.exists == true
- with_items:
- - "{{ openshift.common.config_base }}/master"
- - "{{ openshift.common.config_base }}/node"
- - "{{ openshift.common.config_base }}/.rpmgenerated"
-
# TODO: These values need to be configurable
- name: Set dns facts
openshift_facts:
@@ -152,9 +147,8 @@
when: not openshift_master_ha | bool
register: start_result
-- name: pause to prevent service restart from interfering with bootstrapping
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ master_service_status_changed = start_result | changed
- name: Install cluster packages
yum: pkg=pcs state=present
@@ -169,13 +163,17 @@
shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
when: install_result | changed
+- name: Lookup default group for ansible_ssh_user
+ command: "/usr/bin/id -g {{ ansible_ssh_user }}"
+ register: _ansible_ssh_user_gid
+
- name: Create the client config dir(s)
file:
path: "~{{ item }}/.kube"
state: directory
mode: 0700
owner: "{{ item }}"
- group: "{{ item }}"
+ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items:
- root
- "{{ ansible_ssh_user }}"
@@ -196,7 +194,7 @@
state: file
mode: 0700
owner: "{{ item }}"
- group: "{{ item }}"
+ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items:
- root
- "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 500690523..73a0bc6cc 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,5 +1,7 @@
apiLevels:
+{% if openshift.common.deployment_type == "enterprise" %}
- v1beta3
+{% endif %}
- v1
apiVersion: v1
assetConfig:
@@ -20,6 +22,9 @@ corsAllowedOrigins:
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
+{% if 'disabled_features' in openshift.master %}
+disabledFeatures: {{ openshift.master.disabled_features | to_json }}
+{% endif %}
{% if openshift.master.embedded_dns | bool %}
dnsConfig:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
@@ -66,7 +71,9 @@ kubeletClientInfo:
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
apiLevels:
+{% if openshift.common.deployment_type == "enterprise" %}
- v1beta3
+{% endif %}
- v1
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
@@ -74,6 +81,9 @@ kubernetesMasterConfig:
masterCount: 1
masterIP: ""
podEvictionTimeout: ""
+ proxyClientInfo:
+ certFile: master.proxy-client.crt
+ keyFile: master.proxy-client.key
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
servicesSubnet: {{ openshift.master.portal_net }}
@@ -87,7 +97,9 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
{% include 'v1_partials/oauthConfig.j2' %}
@@ -95,15 +107,14 @@ policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
-{# TODO: Allow users to override projectConfig items #}
projectConfig:
- defaultNodeSelector: "{{ openshift.master.default_node_selector | default("") }}"
- projectRequestMessage: ""
- projectRequestTemplate: ""
+ defaultNodeSelector: "{{ openshift.master.default_node_selector }}"
+ projectRequestMessage: "{{ openshift.master.project_request_message }}"
+ projectRequestTemplate: "{{ openshift.master.project_request_template }}"
securityAllocator:
- mcsAllocatorRange: s0:/2
- mcsLabelsPerProject: 5
- uidAllocatorRange: 1000000000-1999999999/10000
+ mcsAllocatorRange: "{{ openshift.master.mcs_allocator_range }}"
+ mcsLabelsPerProject: {{ openshift.master.mcs_labels_per_project }}
+ uidAllocatorRange: "{{ openshift.master.uid_allocator_range }}"
routingConfig:
subdomain: "{{ openshift.master.default_subdomain | default("") }}"
serviceAccountConfig:
diff --git a/roles/openshift_master/templates/scheduler.json.j2 b/roles/openshift_master/templates/scheduler.json.j2
index 835f2383e..cb5f43bb2 100644
--- a/roles/openshift_master/templates/scheduler.json.j2
+++ b/roles/openshift_master/templates/scheduler.json.j2
@@ -1,4 +1,6 @@
{
+ "kind": "Policy",
+ "apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
index 72889bc29..8a4f5a746 100644
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -80,6 +80,7 @@ oauthConfig:
provider:
{{ identity_provider_config(identity_provider) }}
{%- endfor %}
+ masterCA: ca.crt
masterPublicURL: {{ openshift.master.public_api_url }}
masterURL: {{ openshift.master.api_url }}
sessionConfig:
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 5c9639ea5..cfd1ceabf 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -18,5 +18,4 @@
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_master_config_dir }} --overwrite=false
- args:
- creates: "{{ openshift_master_config_dir }}/master.server.key"
+ when: master_certs_missing
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 0d75a9eb3..e4602337e 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -20,6 +20,8 @@
- admin.kubeconfig
- master.kubelet-client.crt
- master.kubelet-client.key
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
+ - "{{ 'master.proxy-client.key' if openshift.common.version_greater_than_3_1_or_1_1 else omit }}"
- openshift-master.crt
- openshift-master.key
- openshift-master.kubeconfig
@@ -41,6 +43,5 @@
--public-master={{ item.openshift.master.public_api_url }}
--cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
--overwrite=false
- args:
- creates: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}/master.server.crt"
+ when: master_certs_missing
with_items: masters_needing_certs
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c4abf9d7c..fffbf2994 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -10,3 +10,6 @@ os_firewall_allow:
port: 10255/tcp
- service: Openshift kubelet ReadOnlyPort udp
port: 10255/udp
+- service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.node.use_openshift_sdn | bool
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 633f3ed13..447ca85f3 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,6 +1,7 @@
---
- name: restart node
service: name={{ openshift.common.service_type }}-node state=restarted
+ when: not node_service_status_changed | default(false)
- name: restart docker
service: name=docker state=restarted
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e8cc499c0..aea60b75c 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -22,7 +22,7 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- labels: "{{ openshift_node_labels | default(none) }}"
+ labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
annotations: "{{ openshift_node_annotations | default(none) }}"
registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
@@ -30,6 +30,9 @@
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ docker_log_driver: "{{ lookup( 'oo_option' , 'docker_log_driver' ) | default('',True) }}"
+ docker_log_options: "{{ lookup( 'oo_option' , 'docker_log_options' ) | default('',True) }}"
+ storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly.
@@ -68,12 +71,14 @@
register: docker_check
# TODO: Enable secure registry when code available in origin
-- name: Secure Registry
+- name: Secure Registry and Logs Options
lineinfile:
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='--insecure-registry={{ openshift.node.portal_net }} \
-{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %}'"
+{% if ansible_selinux and ansible_selinux.status == '''enabled''' %}--selinux-enabled{% endif %} \
+{% if openshift.node.docker_log_driver is defined %} --log-driver {{ openshift.node.docker_log_driver }} {% endif %} \
+{% if openshift.node.docker_log_options is defined %} {{ openshift.node.docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}} {% endif %} '"
when: docker_check.stat.isreg
notify:
- restart docker
@@ -120,14 +125,12 @@
notify:
- restart docker
-- name: Allow NFS access for VMs
- seboolean: name=virt_use_nfs state=yes persistent=yes
- when: ansible_selinux and ansible_selinux.status == "enabled"
+- name: Additional storage plugin configuration
+ include: storage_plugins/main.yml
- name: Start and enable node
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: start_result
-- name: pause to prevent service restart from interfering with bootstrapping
- pause: seconds=30
- when: start_result | changed
+- set_fact:
+ node_service_status_changed = start_result | changed
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
new file mode 100644
index 000000000..b6936618a
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -0,0 +1,5 @@
+---
+- name: Install Ceph storage plugin dependencies
+ yum:
+ pkg: ceph-common
+ state: installed
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
new file mode 100644
index 000000000..b812e81df
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -0,0 +1,12 @@
+---
+- name: Install GlusterFS storage plugin dependencies
+ yum:
+ pkg: glusterfs-fuse
+ state: installed
+
+- name: Set seboolean to allow gluster storage plugin access from containers
+ seboolean:
+ name: virt_use_fusefs
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
diff --git a/roles/openshift_node/tasks/storage_plugins/main.yml b/roles/openshift_node/tasks/storage_plugins/main.yml
new file mode 100644
index 000000000..39c7b9390
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/main.yml
@@ -0,0 +1,13 @@
+---
+# The NFS storage plugin is always enabled since it doesn't require any
+# additional package dependencies
+- name: NFS storage plugin configuration
+ include: nfs.yml
+
+- name: GlusterFS storage plugin configuration
+ include: glusterfs.yml
+ when: "'glusterfs' in openshift.node.storage_plugin_deps"
+
+- name: Ceph storage plugin configuration
+ include: ceph.yml
+ when: "'ceph' in openshift.node.storage_plugin_deps"
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
new file mode 100644
index 000000000..1edf21d9b
--- /dev/null
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -0,0 +1,7 @@
+---
+- name: Set seboolean to allow nfs storage plugin access from containers
+ seboolean:
+ name: virt_use_nfs
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 07d80f99b..4931d127e 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -12,13 +12,17 @@ kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
+{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% endif %}
# networkConfig struct introduced in origin 1.0.6 and OSE 3.0.2 which
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
+{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
-nodeName: {{ openshift.common.hostname }}
+{% endif %}
+nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
bindAddress: 0.0.0.0:10250
diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml
index 29387d7d5..749eea5c0 100644
--- a/roles/openshift_registry/tasks/main.yml
+++ b/roles/openshift_registry/tasks/main.yml
@@ -1,11 +1,14 @@
---
-- set_fact: _oreg_images="--images={{ oreg_url|quote }}"
- when: oreg_url is defined
+# This role is unused until we add options for configuring the backend storage
+
+- set_fact: _oreg_images="--images='{{ openshift.master.registry_url }}'"
+
+- set_fact: _oreg_selector="--selector='{{ openshift.master.registry_selector }}'"
- name: Deploy OpenShift Registry
command: >
{{ openshift.common.admin_binary }} registry
- --create
- --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images|default() }}
+ --create --service-account=registry {{ _oreg_selector }}
+ --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images }}
register: _oreg_results
changed_when: "'service exists' not in _oreg_results.stdout"
diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml
index 929177262..498a65127 100644
--- a/roles/openshift_router/tasks/main.yml
+++ b/roles/openshift_router/tasks/main.yml
@@ -1,11 +1,14 @@
---
-- set_fact: _ortr_images="--images={{ oreg_url|quote }}"
- when: oreg_url is defined
+
+- set_fact: _ortr_images="--images='{{ openshift.master.registry_url }}'"
+
+- set_fact: _ortr_selector="--selector='{{ openshift.master.router_selector }}'"
- name: Deploy OpenShift Router
command: >
{{ openshift.common.admin_binary }} router
- --create
- --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images|default() }}
+ --create --replicas={{ openshift.master.infra_nodes }}
+ --service-account=router {{ _ortr_selector }}
+ --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images }}
register: _ortr_results
changed_when: "'service exists' not in _ortr_results.stdout"
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index 8347e9a61..82bf78b57 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -12,6 +12,10 @@
- include_vars: template_docker.yml
- include_vars: template_openshift_master.yml
- include_vars: template_openshift_node.yml
+- include_vars: template_ops_tools.yml
+- include_vars: template_app_zabbix_server.yml
+- include_vars: template_app_zabbix_agent.yml
+- include_vars: template_performance_copilot.yml
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
@@ -52,3 +56,35 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+
+- name: Include Template Ops Tools
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_ops_tools }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template App Zabbix Server
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_app_zabbix_server }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template App Zabbix Agent
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_app_zabbix_agent }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
+
+- name: Include Template Performance Copilot
+ include: ../../lib_zabbix/tasks/create_template.yml
+ vars:
+ template: "{{ g_template_performance_copilot }}"
+ server: "{{ ozb_server }}"
+ user: "{{ ozb_user }}"
+ password: "{{ ozb_password }}"
diff --git a/roles/os_zabbix/vars/template_app_zabbix_agent.yml b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
new file mode 100644
index 000000000..d636d4822
--- /dev/null
+++ b/roles/os_zabbix/vars/template_app_zabbix_agent.yml
@@ -0,0 +1,23 @@
+---
+g_template_app_zabbix_agent:
+ name: Template App Zabbix Agent
+ zitems:
+ - key: agent.hostname
+ applications:
+ - Zabbix agent
+ value_type: character
+ zabbix_type: agent
+
+ - key: agent.ping
+ applications:
+ - Zabbix agent
+ description: The agent always returns 1 for this item. It could be used in combination with nodata() for availability check.
+ value_type: int
+ zabbix_type: agent
+
+ ztriggers:
+ - name: '[Reboot] Zabbix agent on {HOST.NAME} is unreachable for 15 minutes'
+ description: Zabbix agent is unreachable for 15 minutes.
+ expression: '{Template App Zabbix Agent:agent.ping.nodata(15m)}=1'
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_ping.asciidoc
diff --git a/roles/os_zabbix/vars/template_app_zabbix_server.yml b/roles/os_zabbix/vars/template_app_zabbix_server.yml
new file mode 100644
index 000000000..43517113b
--- /dev/null
+++ b/roles/os_zabbix/vars/template_app_zabbix_server.yml
@@ -0,0 +1,412 @@
+---
+g_template_app_zabbix_server:
+ name: Template App Zabbix Server
+ zitems:
+ - key: housekeeper_creates
+ applications:
+ - Zabbix server
+ description: A simple count of the number of partition creates output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: internal
+
+ - key: housekeeper_drops
+ applications:
+ - Zabbix server
+ description: A simple count of the number of partition drops output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: internal
+
+ - key: housekeeper_errors
+ applications:
+ - Zabbix server
+ description: A simple count of the number of errors output by the housekeeper script.
+ units: ''
+ value_type: int
+ zabbix_type: internal
+
+ - key: housekeeper_total
+ applications:
+ - Zabbix server
+ description: A simple count of the total number of lines output by the housekeeper
+ script.
+ units: ''
+ value_type: int
+ zabbix_type: internal
+
+ - key: zabbix[process,alerter,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,configuration syncer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,db watchdog,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,discoverer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,escalator,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,history syncer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,housekeeper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,http poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,icmp pinger,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,ipmi poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,java poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,node watcher,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,proxy poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,self-monitoring,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,snmp trapper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,timer,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,trapper,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[process,unreachable poller,avg,busy]
+ applications:
+ - Zabbix server
+ description: ''
+ units: '%'
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[queue,10m]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: int
+ zabbix_type: internal
+ interval: 600
+
+ - key: zabbix[queue]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: int
+ zabbix_type: internal
+ interval: 600
+
+ - key: zabbix[rcache,buffer,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[wcache,history,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[wcache,text,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[wcache,trend,pfree]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: internal
+
+ - key: zabbix[wcache,values]
+ applications:
+ - Zabbix server
+ description: ''
+ units: ''
+ value_type: float
+ zabbix_type: internal
+ delta: 1 # speed per second
+
+ ztriggers:
+ - description: "There has been unexpected output while running the housekeeping script\
+ \ on the Zabbix. There are only three kinds of lines we expect to see in the output,\
+ \ and we've gotten something enw.\r\n\r\nCheck the script's output in /var/lib/zabbix/state\
+ \ for more details."
+ expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}+{Template App Zabbix Server:housekeeper_creates.last(0)}+{Template App Zabbix Server:housekeeper_drops.last(0)}<>{Template App Zabbix Server:housekeeper_total.last(0)}'
+ name: Unexpected output in Zabbix DB Housekeeping
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_DB_Housekeeping.asciidoc
+
+ - description: An error has occurred during running the housekeeping script on the Zabbix. Check the script's output in /var/lib/zabbix/state for more details.
+ expression: '{Template App Zabbix Server:housekeeper_errors.last(0)}>0'
+ name: Errors during Zabbix DB Housekeeping
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,alerter,avg,busy].min(600)}>75'
+ name: Zabbix alerter processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,configuration syncer,avg,busy].min(600)}>75'
+ name: Zabbix configuration syncer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,db watchdog,avg,busy].min(600)}>75'
+ name: Zabbix db watchdog processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,discoverer,avg,busy].min(600)}>75'
+ name: Zabbix discoverer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,escalator,avg,busy].min(600)}>75'
+ name: Zabbix escalator processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,history syncer,avg,busy].min(600)}>75'
+ name: Zabbix history syncer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,housekeeper,avg,busy].min(1800)}>75'
+ name: Zabbix housekeeper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,http poller,avg,busy].min(600)}>75'
+ name: Zabbix http poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,icmp pinger,avg,busy].min(600)}>75'
+ name: Zabbix icmp pinger processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,ipmi poller,avg,busy].min(600)}>75'
+ name: Zabbix ipmi poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,java poller,avg,busy].min(600)}>75'
+ name: Zabbix java poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,node watcher,avg,busy].min(600)}>75'
+ name: Zabbix node watcher processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,poller,avg,busy].min(600)}>75'
+ name: Zabbix poller processes more than 75% busy
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,proxy poller,avg,busy].min(600)}>75'
+ name: Zabbix proxy poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,self-monitoring,avg,busy].min(600)}>75'
+ name: Zabbix self-monitoring processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,snmp trapper,avg,busy].min(600)}>75'
+ name: Zabbix snmp trapper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: Timer processes usually are busy because they have to process time
+ based trigger functions
+ expression: '{Template App Zabbix Server:zabbix[process,timer,avg,busy].min(600)}>75'
+ name: Zabbix timer processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,trapper,avg,busy].min(600)}>75'
+ name: Zabbix trapper processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[process,unreachable poller,avg,busy].min(600)}>75'
+ name: Zabbix unreachable poller processes more than 75% busy
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/Zabbix_state_check.asciidoc
+
+ - description: "This alert generally indicates a performance problem or a problem\
+ \ with the zabbix-server or proxy.\r\n\r\nThe first place to check for issues\
+ \ is Administration > Queue. Be sure to check the general view and the per-proxy\
+ \ view."
+ expression: '{Template App Zabbix Server:zabbix[queue,10m].min(600)}>1000'
+ name: More than 1000 items having missing data for more than 10 minutes
+ priority: high
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/data_lost_overview_plugin.asciidoc
+
+ - description: Consider increasing CacheSize in the zabbix_server.conf configuration
+ file
+ expression: '{Template App Zabbix Server:zabbix[rcache,buffer,pfree].min(600)}<5'
+ name: Less than 5% free in the configuration cache
+ priority: info
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,history,pfree].min(600)}<25'
+ name: Less than 25% free in the history cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,text,pfree].min(600)}<25'
+ name: Less than 25% free in the text history cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
+
+ - description: ''
+ expression: '{Template App Zabbix Server:zabbix[wcache,trend,pfree].min(600)}<25'
+ name: Less than 25% free in the trends cache
+ priority: avg
+ url: https://github.com/openshift/ops-sop/blob/master/Alerts/check_cache.asciidoc
diff --git a/roles/os_zabbix/vars/template_docker.yml b/roles/os_zabbix/vars/template_docker.yml
index 395e054de..bfabf50c5 100644
--- a/roles/os_zabbix/vars/template_docker.yml
+++ b/roles/os_zabbix/vars/template_docker.yml
@@ -7,6 +7,11 @@ g_template_docker:
- Docker Daemon
value_type: int
+ - key: docker.info_elapsed_ms
+ applications:
+ - Docker Daemon
+ value_type: int
+
- key: docker.storage.is_loopback
applications:
- Docker Storage
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index c71e07910..cd702a814 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -13,6 +13,96 @@ g_template_openshift_master:
applications:
- Openshift Master
+ - key: openshift.master.user.count
+ description: Shows number of users in a cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pod.running.count
+ description: Shows number of pods running
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.project.counter
+ description: Shows number of projects on a cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.create.success
+ description: Show number of successful create actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.create.fail
+ description: Show number of failed create actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.delete.success
+ description: Show number of successful delete actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.delete.fail
+ description: Show number of failed delete actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.get.success
+ description: Show number of successful get actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.get.fail
+ description: Show number of failed get actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.set.success
+ description: Show number of successful set actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.set.fail
+ description: Show number of failed set actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.update.success
+ description: Show number of successful update actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.update.fail
+ description: Show number of failed update actions
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.watchers
+ description: Show number of etcd watchers
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.etcd.ping
+ description: etcd ping
+ type: int
+ applications:
+ - Openshift Master
+
ztriggers:
- name: 'Application creation has failed on {HOST.NAME}'
expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
@@ -28,3 +118,23 @@ g_template_openshift_master:
expression: '{Template Openshift Master:openshift.master.process.count.min(#3)}>1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: high
+
+ - name: 'Number of users for Openshift Master on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: info
+
+ - name: 'There are no projects running on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ priority: info
+
+ - name: 'Low number of etcd watchers on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: avg
+
+ - name: 'Etcd ping failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: high
diff --git a/roles/os_zabbix/vars/template_openshift_node.yml b/roles/os_zabbix/vars/template_openshift_node.yml
index 36f9cc4a3..ce28b1048 100644
--- a/roles/os_zabbix/vars/template_openshift_node.yml
+++ b/roles/os_zabbix/vars/template_openshift_node.yml
@@ -8,13 +8,37 @@ g_template_openshift_node:
applications:
- Openshift Node
+ - key: openshift.node.ovs.pids.count
+ description: Shows number of ovs process ids running
+ type: int
+ applications:
+ - Openshift Node
+
+ - key: openshift.node.ovs.ports.count
+ description: Shows number of OVS ports defined
+ type: int
+ applications:
+ - Openshift Node
+
ztriggers:
- name: 'Openshift Node process not running on {HOST.NAME}'
expression: '{Template Openshift Node:openshift.node.process.count.max(#3)}<1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_node.asciidoc'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
priority: high
- name: 'Too many Openshift Node processes running on {HOST.NAME}'
expression: '{Template Openshift Node:openshift.node.process.count.min(#3)}>1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_node.asciidoc'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+ - name: 'OVS may not be running on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.ovs.pids.count.last()}<>4'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
priority: high
+
+ - name: 'Number of OVS ports is 0 on {HOST.NAME}'
+ expression: '{Template Openshift Node:openshift.node.ovs.ports.count.last()}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/node/V3/Alerts/openshift_node.asciidoc'
+ priority: high
+
+
diff --git a/roles/os_zabbix/vars/template_ops_tools.yml b/roles/os_zabbix/vars/template_ops_tools.yml
new file mode 100644
index 000000000..d1b8a2514
--- /dev/null
+++ b/roles/os_zabbix/vars/template_ops_tools.yml
@@ -0,0 +1,23 @@
+---
+g_template_ops_tools:
+ name: Template Operations Tools
+ zdiscoveryrules:
+ - name: disc.ops.runner
+ key: disc.ops.runner
+ lifetime: 1
+ description: "Dynamically register operations runner items"
+
+ zitemprototypes:
+ - discoveryrule_key: disc.ops.runner
+ name: "Exit code of ops-runner[{#OSO_COMMAND}]"
+ key: "disc.ops.runner.command.exitcode[{#OSO_COMMAND}]"
+ value_type: int
+ description: "The exit code of the command run from ops-runner"
+ applications:
+ - Ops Runner
+
+ ztriggerprototypes:
+ - name: 'ops-runner[{#OSO_COMMAND}]: non-zero exit code on {HOST.NAME}'
+ expression: '{Template Operations Tools:disc.ops.runner.command.exitcode[{#OSO_COMMAND}].last()}<>0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_ops_runner_command.asciidoc'
+ priority: average
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 36c890da9..aeeec4b8d 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -188,28 +188,14 @@ g_template_os_linux:
multiplier: 1024
units: B
- # Disk items
- - key: filesys.full.xvda2
- applications:
- - Disk
- value_type: float
-
- - key: filesys.full.xvda3
- applications:
- - Disk
- value_type: float
-
-
zdiscoveryrules:
- name: disc.filesys
key: disc.filesys
lifetime: 1
- template_name: Template OS Linux
description: "Dynamically register the filesystems"
zitemprototypes:
- discoveryrule_key: disc.filesys
- template_name: Template OS Linux
name: "disc.filesys.full.{#OSO_FILESYS}"
key: "disc.filesys.full[{#OSO_FILESYS}]"
value_type: float
@@ -217,38 +203,36 @@ g_template_os_linux:
applications:
- Disk
- ztriggerprototypes:
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
-
- - name: 'Filesystem: {#OSO_FILESYS} has less than 5% free on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>95'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: high
+ - discoveryrule_key: disc.filesys
+ name: "Percentage of used inodes on {#OSO_FILESYS}"
+ key: "disc.filesys.inodes.pused[{#OSO_FILESYS}]"
+ value_type: float
+ description: "PCP derived value of percentage of used inodes on a filesystem."
+ applications:
+ - Disk
- ztriggers:
- - name: 'Filesystem: / has less than 10% free on {HOST.NAME}'
- expression: '{Template OS Linux:filesys.full.xvda2.last()}>90'
+ ztriggerprototypes:
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: warn
- - name: 'Filesystem: / has less than 5% free on {HOST.NAME}'
- expression: '{Template OS Linux:filesys.full.xvda2.last()}>95'
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
- - name: 'Filesystem: /var has less than 10% free on {HOST.NAME}'
- expression: '{Template OS Linux:filesys.full.xvda3.last()}>90'
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: warn
- - name: 'Filesystem: /var has less than 5% free on {HOST.NAME}'
- expression: '{Template OS Linux:filesys.full.xvda3.last()}>95'
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
+ ztriggers:
- name: 'Too many TOTAL processes on {HOST.NAME}'
expression: '{Template OS Linux:proc.nprocs.last()}>5000'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_proc.asciidoc'
@@ -259,3 +243,18 @@ g_template_os_linux:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_memory.asciidoc'
priority: warn
description: 'Alert on less than 30MegaBytes. This is 30 Million Bytes. 30000 KB x 1024'
+
+ # CPU Utilization #
+ - name: 'CPU idle less than 5% on {HOST.NAME}'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<5'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
+ priority: average
+ description: 'CPU is less than 5% idle'
+
+ - name: 'CPU idle less than 10% on {HOST.NAME}'
+ expression: '{Template OS Linux:kernel.all.cpu.idle.max(#5)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_cpu_idle.asciidoc'
+ priority: average
+ description: 'CPU is less than 10% idle'
+ dependencies:
+ - 'CPU idle less than 5% on {HOST.NAME}'
diff --git a/roles/os_zabbix/vars/template_performance_copilot.yml b/roles/os_zabbix/vars/template_performance_copilot.yml
new file mode 100644
index 000000000..b62fa0228
--- /dev/null
+++ b/roles/os_zabbix/vars/template_performance_copilot.yml
@@ -0,0 +1,14 @@
+---
+g_template_performance_copilot:
+ name: Template Performance Copilot
+ zitems:
+ - key: pcp.ping
+ applications:
+ - Performance Copilot
+ value_type: int
+
+ ztriggers:
+ - name: 'pcp.ping failed on {HOST.NAME}'
+ expression: '{Template Performance Copilot:pcp.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_pcp_ping.asciidoc'
+ priority: average
diff --git a/utils/.gitignore b/utils/.gitignore
new file mode 100644
index 000000000..68759c0ba
--- /dev/null
+++ b/utils/.gitignore
@@ -0,0 +1,45 @@
+package/
+
+# Backup files
+*.~
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+bin/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+.tox/
+.coverage
+.cache
+.noseids
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+
+# Sphinx documentation
+docs/_build/
diff --git a/utils/README.txt b/utils/README.txt
new file mode 100644
index 000000000..6a6a1d24d
--- /dev/null
+++ b/utils/README.txt
@@ -0,0 +1,24 @@
+## Running From Source
+
+You will need to setup a virtualenv to run from source and execute the unit tests.
+
+$ virtualenv oo-install
+$ source ./oo-install/bin/activate
+$ virtualenv --relocatable ./oo-install/
+$ python setup.py install
+
+The virtualenv bin directory should now be at the start of your $PATH, and oo-install is ready to use from your shell.
+
+You can exit the virtualenv with:
+
+$ deactivate
+
+## Testing
+
+Install some testing libraries: (we cannot do this via setuptools due to the version virtualenv bundles)
+
+$ pip install mock nose
+
+Then run the tests with:
+
+$ oo-install/bin/nosetests
diff --git a/utils/docs/config.md b/utils/docs/config.md
new file mode 100644
index 000000000..9399409dd
--- /dev/null
+++ b/utils/docs/config.md
@@ -0,0 +1,72 @@
+# oo-install Supported Configuration File
+
+Upon completion oo-install will write out a configuration file representing the settings that were gathered and used. This configuration file, or one crafted by hand, can be used to run or re-run the installer and add additional hosts, upgrade, or re-install.
+
+The default location this config file will be written to ~/.config/openshift/installer.cfg.yml.
+
+## Example
+
+```
+variant: openshift-enterprise
+variant_version: 3.0
+ansible_ssh_user: root
+hosts:
+- ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ containerized: true
+- ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+- ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+```
+
+## Primary Settings
+
+### variant
+
+The OpenShift variant to install. Currently valid options are:
+
+ * openshift-enterprise
+ * atomic-enterprise
+
+### variant_version (optional)
+
+Default: Latest version for your chosen variant.
+
+A version which must be valid for your selected variant. If not specified the latest will be assumed.
+
+Examples: 3.0, 3.1, etc.
+
+### hosts
+
+This section defines a list of the hosts you wish to install the OpenShift master/node service on.
+
+*ip* or *hostname* must be specified so the installer can connect to the system to gather facts before proceeding with the install.
+
+If *public_ip* or *public_hostname* are not specified, this information will be gathered from the facts and the user will be asked to confirm in an editor. For an unattended install, the installer will error out. (you must provide complete host records for an unattended install)
+
+*master* and *node* determine the type of services that will be installed. One of these must be set to true for the configuration file to be considered valid.
+
+*containerized* indicates you want to run OpenShift services in a container on this host.
+
+### ansible_ssh_user
+
+Default: root
+
+Defines the user ansible will use to ssh to remote systems for gathering facts and the installation.
+
+### ansible_log_path
+
+Default: /tmp/ansible.log
+
+
diff --git a/utils/etc/ansible.cfg b/utils/etc/ansible.cfg
new file mode 100644
index 000000000..b7376ddfc
--- /dev/null
+++ b/utils/etc/ansible.cfg
@@ -0,0 +1,25 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+forks = 10
+host_key_checking = False
+nocows = 1
+# Need to handle:
+# inventory - derive from OO_ANSIBLE_DIRECTORY env var
+# callback_plugins - derive from pkg_resource.resource_filename
+# private_key_file - prompt if missing
+# remote_tmp - set if provided by user (cli)
+# ssh_args - set if provided by user (cli)
+# control_path \ No newline at end of file
diff --git a/utils/setup.cfg b/utils/setup.cfg
new file mode 100644
index 000000000..79bc67848
--- /dev/null
+++ b/utils/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
diff --git a/utils/setup.py b/utils/setup.py
new file mode 100644
index 000000000..6e2fdd9c0
--- /dev/null
+++ b/utils/setup.py
@@ -0,0 +1,85 @@
+"""A setuptools based setup module.
+
+"""
+
+# Always prefer setuptools over distutils
+from setuptools import setup
+
+setup(
+ name='ooinstall',
+
+ # Versions should comply with PEP440. For a discussion on single-sourcing
+ # the version across setup.py and the project code, see
+ # https://packaging.python.org/en/latest/single_source_version.html
+ version="3.0.0",
+
+ description="Ansible wrapper for OpenShift Enterprise 3 installation.",
+
+ # The project's main homepage.
+ url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install",
+
+ # Author details
+ author="openshift@redhat.com",
+ author_email="OpenShift",
+
+ # Choose your license
+ license="Apache 2.0",
+
+ # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: Utilities',
+ ],
+
+ # What does your project relate to?
+ keywords='oo-install setuptools development',
+
+ # You can just specify the packages manually here if your project is
+ # simple. Or you can use find_packages().
+ #packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
+ packages=['ooinstall'],
+ package_dir={'ooinstall': 'src/ooinstall'},
+
+
+ # List run-time dependencies here. These will be installed by pip when
+ # your project is installed. For an analysis of "install_requires" vs pip's
+ # requirements files see:
+ # https://packaging.python.org/en/latest/requirements.html
+ install_requires=['click', 'PyYAML'],
+
+ # List additional groups of dependencies here (e.g. development
+ # dependencies). You can install these using the following syntax,
+ # for example:
+ # $ pip install -e .[dev,test]
+ #extras_require={
+ # 'dev': ['check-manifest'],
+ # 'test': ['coverage'],
+ #},
+
+ # If there are data files included in your packages that need to be
+ # installed, specify them here. If using Python 2.6 or less, then these
+ # have to be included in MANIFEST.in as well.
+ package_data={
+ 'ooinstall': ['ansible.cfg', 'ansible_plugins/*'],
+ },
+
+ # Although 'package_data' is the preferred approach, in some case you may
+ # need to place data files outside of your packages. See:
+ # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
+ # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
+ #data_files=[('my_data', ['data/data_file'])],
+ tests_require=['nose'],
+
+ test_suite='nose.collector',
+
+ # To provide executable scripts, use entry points in preference to the
+ # "scripts" keyword. Entry points provide cross-platform support and allow
+ # pip to create the appropriate form of executable for the target platform.
+ entry_points={
+ 'console_scripts': [
+ 'oo-install=ooinstall.cli_installer:main',
+ ],
+ },
+)
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
new file mode 100755
index 000000000..e1b2cec90
--- /dev/null
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+
+# Grab command-line arguments
+cmdlnargs="$@"
+
+: ${OO_INSTALL_KEEP_ASSETS:="false"}
+: ${OO_INSTALL_CONTEXT:="INSTALLCONTEXT"}
+: ${TMPDIR:=/tmp}
+: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
+[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ clear
+ echo "Checking for necessary tools..."
+fi
+if [ -e /etc/redhat-release ]
+then
+ for i in python python-virtualenv openssh-clients gcc
+ do
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; }
+ done
+fi
+for i in python virtualenv ssh gcc
+do
+ command -v $i >/dev/null 2>&1 || { echo >&2 "OpenShift installation requires $i on the PATH but it does not appear to be available. Correct this and rerun the installer."; exit 1; }
+done
+
+# All instances of INSTALLPKGNAME are replaced during packaging with the actual package name.
+if [[ -e ./INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using bundled assets."
+ fi
+ cp INSTALLPKGNAME.tgz ${TMPDIR}/INSTALLPKGNAME.tgz
+elif [[ $OO_INSTALL_KEEP_ASSETS == 'true' && -e ${TMPDIR}/INSTALLPKGNAME.tgz ]]
+then
+ if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+ then
+ echo "Using existing installer assets."
+ fi
+else
+ echo "Downloading oo-install package to ${TMPDIR}INSTALLPKGNAME.tgz..."
+ curl -s -o ${TMPDIR}INSTALLPKGNAME.tgz https://install.openshift.com/INSTALLVERPATHINSTALLPKGNAME.tgz
+fi
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Extracting oo-install to ${TMPDIR}INSTALLPKGNAME..."
+fi
+tar xzf ${TMPDIR}INSTALLPKGNAME.tgz -C ${TMPDIR} 2>&1 >> $OO_INSTALL_LOG
+
+echo "Preparing to install. This can take a minute or two..."
+virtualenv ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+cd ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
+source ./bin/activate 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTALL_LOG
+
+# TODO: these deps should technically be handled as part of installing ooinstall
+pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG
+pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG
+echo "Installation preperation done!" 2>&1 >> $OO_INSTALL_LOG
+
+echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
+then
+ echo "Starting oo-install..." 2>&1 >> $OO_INSTALL_LOG
+else
+ clear
+fi
+oo-install $cmdlnargs --ansible-playbook-directory ${TMPDIR}/INSTALLPKGNAME/openshift-ansible-*/ --ansible-log-path $OO_INSTALL_LOG
+
+if [ $OO_INSTALL_KEEP_ASSETS == 'true' ]
+then
+ echo "Keeping temporary assets in ${TMPDIR}"
+else
+ echo "Removing temporary assets."
+ rm -rf ${TMPDIR}INSTALLPKGNAME
+ rm -rf ${TMPDIR}INSTALLPKGNAME.tgz
+fi
+
+echo "Please see $OO_INSTALL_LOG for full output."
+
+exit
diff --git a/utils/site_assets/oo_install_launcher.README.txt b/utils/site_assets/oo_install_launcher.README.txt
new file mode 100644
index 000000000..46947b481
--- /dev/null
+++ b/utils/site_assets/oo_install_launcher.README.txt
@@ -0,0 +1,22 @@
+= oo-install Portable Installer Package
+
+This package is identical to the installer package that can be downloaded
+and executed directly from https://install.openshift.com/.
+
+NOTE: It will still be necessary for this installer to download RPMs from the
+internet, unless you have already set up the necessary local repositories.
+
+To run the installer from this package, run the following command:
+
+$ ./LAUNCHERNAME
+
+That command script and the packaged zip file can be burned to a CD or
+written to a USB drive and used to run the oo-install utility in places
+where the web-based installer is not reachable.
+
+All of the command-line arguments supported by oo-install can be passed
+to this launcher application.
+
+For more information for Enterprise installs, refer to the OpenShift
+Enterprise Administrator Guide:
+https://docs.openshift.com/enterprise/latest/welcome/index.html
diff --git a/utils/src/DESCRIPTION.rst b/utils/src/DESCRIPTION.rst
new file mode 100644
index 000000000..68b3a57f2
--- /dev/null
+++ b/utils/src/DESCRIPTION.rst
@@ -0,0 +1,13 @@
+A sample Python project
+=======================
+
+This is the description file for the project.
+
+The file should use UTF-8 encoding and be written using ReStructured Text. It
+will be used to generate the project webpage on PyPI, and should be written for
+that purpose.
+
+Typical contents for this file would include an overview of the project, basic
+usage examples, etc. Generally, including the project changelog in here is not
+a good idea, although a simple "What's New" section for the most recent version
+may be appropriate.
diff --git a/utils/src/MANIFEST.in b/utils/src/MANIFEST.in
new file mode 100644
index 000000000..d4153e738
--- /dev/null
+++ b/utils/src/MANIFEST.in
@@ -0,0 +1,9 @@
+include DESCRIPTION.rst
+
+# Include the test suite (FIXME: does not work yet)
+# recursive-include tests *
+
+# If using Python 2.6 or less, then have to include package data, even though
+# it's already declared in setup.py
+include ooinstall/*
+include ansible.cfg
diff --git a/utils/src/data/data_file b/utils/src/data/data_file
new file mode 100644
index 000000000..7c0646bfd
--- /dev/null
+++ b/utils/src/data/data_file
@@ -0,0 +1 @@
+some data \ No newline at end of file
diff --git a/utils/src/ooinstall/__init__.py b/utils/src/ooinstall/__init__.py
new file mode 100644
index 000000000..944dea3b5
--- /dev/null
+++ b/utils/src/ooinstall/__init__.py
@@ -0,0 +1,5 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=missing-docstring
+
+from .oo_config import OOConfig
diff --git a/utils/src/ooinstall/ansible_plugins/facts_callback.py b/utils/src/ooinstall/ansible_plugins/facts_callback.py
new file mode 100644
index 000000000..ea6ed6574
--- /dev/null
+++ b/utils/src/ooinstall/ansible_plugins/facts_callback.py
@@ -0,0 +1,88 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import os
+import yaml
+
+class CallbackModule(object):
+
+ def __init__(self):
+ ######################
+ # This is ugly stoopid. This should be updated in the following ways:
+ # 1) it should probably only be used for the
+ # openshift_facts.yml playbook, so maybe there's some way to check
+ # a variable that's set when that playbook is run?
+ try:
+ self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
+ except KeyError:
+ raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
+ 'variable must be set.')
+ self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
+ os.O_WRONLY)
+
+ def on_any(self, *args, **kwargs):
+ pass
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ pass
+
+ def runner_on_ok(self, host, res):
+ if res['invocation']['module_args'] == 'var=result':
+ facts = res['var']['result']['ansible_facts']['openshift']
+ hosts_yaml = {}
+ hosts_yaml[host] = facts
+ os.write(self.hosts_yaml, yaml.safe_dump(hosts_yaml))
+
+ def runner_on_skipped(self, host, item=None):
+ pass
+
+ def runner_on_unreachable(self, host, res):
+ pass
+
+ def runner_on_no_hosts(self):
+ pass
+
+ def runner_on_async_poll(self, host, res):
+ pass
+
+ def runner_on_async_ok(self, host, res):
+ pass
+
+ def runner_on_async_failed(self, host, res):
+ pass
+
+ def playbook_on_start(self):
+ pass
+
+ def playbook_on_notify(self, host, handler):
+ pass
+
+ def playbook_on_no_hosts_matched(self):
+ pass
+
+ def playbook_on_no_hosts_remaining(self):
+ pass
+
+ def playbook_on_task_start(self, name, is_conditional):
+ pass
+
+ #pylint: disable=too-many-arguments
+ def playbook_on_vars_prompt(self, varname, private=True, prompt=None,
+ encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def playbook_on_setup(self):
+ pass
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ pass
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ pass
+
+ def playbook_on_play_start(self, name):
+ pass
+
+ def playbook_on_stats(self, stats):
+ pass
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
new file mode 100644
index 000000000..c2ae00bd1
--- /dev/null
+++ b/utils/src/ooinstall/cli_installer.py
@@ -0,0 +1,479 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+
+import click
+import os
+import re
+import sys
+from ooinstall import install_transactions
+from ooinstall import OOConfig
+from ooinstall.oo_config import Host
+from ooinstall.variants import find_variant, get_variant_version_combos
+
+DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg'
+
+def validate_ansible_dir(path):
+ if not path:
+ raise click.BadParameter('An ansible path must be provided')
+ return path
+ # if not os.path.exists(path)):
+ # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
+
+def is_valid_hostname(hostname):
+ if not hostname or len(hostname) > 255:
+ return False
+ if hostname[-1] == ".":
+ hostname = hostname[:-1] # strip exactly one dot from the right, if present
+ allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
+ return all(allowed.match(x) for x in hostname.split("."))
+
+def validate_prompt_hostname(hostname):
+ if '' == hostname or is_valid_hostname(hostname):
+ return hostname
+ raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
+ 'Please double-check this value i' \
+ 'and re-enter it.'.format(hostname))
+
+def get_ansible_ssh_user():
+ click.clear()
+ message = """
+This installation process will involve connecting to remote hosts via ssh. Any
+account may be used however if a non-root account is used it must have
+passwordless sudo access.
+"""
+ click.echo(message)
+ return click.prompt('User for ssh access', default='root')
+
+def list_hosts(hosts):
+ hosts_idx = range(len(hosts))
+ for idx in hosts_idx:
+ click.echo(' {}: {}'.format(idx, hosts[idx]))
+
+def delete_hosts(hosts):
+ while True:
+ list_hosts(hosts)
+ del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
+ 'or n/N to add more hosts', default='n')
+ try:
+ del_idx = int(del_idx)
+ hosts.remove(hosts[del_idx])
+ except IndexError:
+ click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
+ except ValueError:
+ try:
+ response = del_idx.lower()
+ if response in ['y', 'n']:
+ return hosts, response
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ except AttributeError:
+ click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
+ return hosts, None
+
+def collect_hosts():
+ """
+ Collect host information from user. This will later be filled in using
+ ansible.
+
+ Returns: a list of host information collected from the user
+ """
+ click.clear()
+ click.echo('***Host Configuration***')
+ message = """
+The OpenShift Master serves the API and web console. It also coordinates the
+jobs that have to run across the environment. It can even run the datastore.
+For wizard based installations the database will be embedded. It's possible to
+change this later using etcd from Red Hat Enterprise Linux 7.
+
+Any Masters configured as part of this installation process will also be
+configured as Nodes. This is so that the Master will be able to proxy to Pods
+from the API. By default this Node will be unscheduleable but this can be changed
+after installation with 'oadm manage-node'.
+
+The OpenShift Node provides the runtime environments for containers. It will
+host the required services to be managed by the Master.
+
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
+http://docs.openshift.com/enterprise/3.0/architecture/infrastructure_components/kubernetes_infrastructure.html#node
+ """
+ click.echo(message)
+
+ hosts = []
+ more_hosts = True
+ ip_regex = re.compile(r'^\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}$')
+
+ while more_hosts:
+ host_props = {}
+ hostname_or_ip = click.prompt('Enter hostname or IP address:',
+ default='',
+ value_proc=validate_prompt_hostname)
+
+ if ip_regex.match(hostname_or_ip):
+ host_props['ip'] = hostname_or_ip
+ else:
+ host_props['hostname'] = hostname_or_ip
+
+ host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
+ host_props['node'] = True
+
+ rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+ type=click.Choice(['rpm', 'container']),
+ default='rpm')
+ if rpm_or_container == 'container':
+ host_props['containerized'] = True
+ else:
+ host_props['containerized'] = False
+
+ host = Host(**host_props)
+
+ hosts.append(host)
+
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+ return hosts
+
+def confirm_hosts_facts(oo_cfg, callback_facts):
+ hosts = oo_cfg.hosts
+ click.clear()
+ message = """
+A list of the facts gathered from the provided hosts follows. Because it is
+often the case that the hostname for a system inside the cluster is different
+from the hostname that is resolveable from command line or web clients
+these settings cannot be validated automatically.
+
+For some cloud providers the installer is able to gather metadata exposed in
+the instance so reasonable defaults will be provided.
+
+Plese confirm that they are correct before moving forward.
+
+"""
+ notes = """
+Format:
+
+IP,public IP,hostname,public hostname
+
+Notes:
+ * The installation host is the hostname from the installer's perspective.
+ * The IP of the host should be the internal IP of the instance.
+ * The public IP should be the externally accessible IP associated with the instance
+ * The hostname should resolve to the internal IP from the instances
+ themselves.
+ * The public hostname should resolve to the external ip from hosts outside of
+ the cloud.
+"""
+
+ # For testing purposes we need to click.echo only once, so build up
+ # the message:
+ output = message
+
+ default_facts_lines = []
+ default_facts = {}
+ validated_facts = {}
+ for h in hosts:
+ default_facts[h] = {}
+ h.ip = callback_facts[str(h)]["common"]["ip"]
+ h.public_ip = callback_facts[str(h)]["common"]["public_ip"]
+ h.hostname = callback_facts[str(h)]["common"]["hostname"]
+ h.public_hostname = callback_facts[str(h)]["common"]["public_hostname"]
+
+ validated_facts[h] = {}
+ default_facts_lines.append(",".join([h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+ output = "%s\n%s" % (output, ",".join([h.ip,
+ h.public_ip,
+ h.hostname,
+ h.public_hostname]))
+
+ output = "%s\n%s" % (output, notes)
+ click.echo(output)
+ facts_confirmed = click.confirm("Do the above facts look correct?")
+ if not facts_confirmed:
+ message = """
+Edit %s with the desired values and rerun oo-install with --unattended .
+""" % oo_cfg.config_path
+ click.echo(message)
+ # Make sure we actually write out the config file.
+ oo_cfg.save_to_disk()
+ sys.exit(0)
+ return default_facts
+
+def get_variant_and_version():
+ message = "\nWhich variant would you like to install?\n\n"
+
+ i = 1
+ combos = get_variant_version_combos()
+ for (variant, version) in combos:
+ message = "%s\n(%s) %s %s" % (message, i, variant.description,
+ version.name)
+ i = i + 1
+
+ click.echo(message)
+ response = click.prompt("Choose a variant from above: ", default=1)
+ product, version = combos[response - 1]
+
+ return product, version
+
+def confirm_continue(message):
+ click.echo(message)
+ click.confirm("Are you ready to continue?", default=False, abort=True)
+ return
+
+def error_if_missing_info(oo_cfg):
+ missing_info = False
+ if not oo_cfg.hosts:
+ missing_info = True
+ click.echo('For unattended installs, hosts must be specified on the '
+ 'command line or in the config file: %s' % oo_cfg.config_path)
+ sys.exit(1)
+
+ if 'ansible_ssh_user' not in oo_cfg.settings:
+ click.echo("Must specify ansible_ssh_user in configuration file.")
+ sys.exit(1)
+
+ # Lookup a variant based on the key we were given:
+ if not oo_cfg.settings['variant']:
+ click.echo("No variant specified in configuration file.")
+ sys.exit(1)
+
+ ver = None
+ if 'variant_version' in oo_cfg.settings:
+ ver = oo_cfg.settings['variant_version']
+ variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
+ if variant is None or version is None:
+ err_variant_name = oo_cfg.settings['variant']
+ if ver:
+ err_variant_name = "%s %s" % (err_variant_name, ver)
+ click.echo("%s is not an installable variant." % err_variant_name)
+ sys.exit(1)
+ oo_cfg.settings['variant_version'] = version.name
+
+ missing_facts = oo_cfg.calc_missing_facts()
+ if len(missing_facts) > 0:
+ missing_info = True
+ click.echo('For unattended installs, facts must be provided for all masters/nodes:')
+ for host in missing_facts:
+ click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
+
+ if missing_info:
+ sys.exit(1)
+
+
+def get_missing_info_from_user(oo_cfg):
+ """ Prompts the user for any information missing from the given configuration. """
+ click.clear()
+
+ message = """
+Welcome to the OpenShift Enterprise 3 installation.
+
+Please confirm that following prerequisites have been met:
+
+* All systems where OpenShift will be installed are running Red Hat Enterprise
+ Linux 7.
+* All systems are properly subscribed to the required OpenShift Enterprise 3
+ repositories.
+* All systems have run docker-storage-setup (part of the Red Hat docker RPM).
+* All systems have working DNS that resolves not only from the perspective of
+ the installer but also from within the cluster.
+
+When the process completes you will have a default configuration for Masters
+and Nodes. For ongoing environment maintenance it's recommended that the
+official Ansible playbooks be used.
+
+For more information on installation prerequisites please see:
+https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
+"""
+ confirm_continue(message)
+ click.clear()
+
+ if oo_cfg.settings.get('ansible_ssh_user', '') == '':
+ oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
+ click.clear()
+
+ if not oo_cfg.hosts:
+ oo_cfg.hosts = collect_hosts()
+ click.clear()
+
+ if oo_cfg.settings.get('variant', '') == '':
+ variant, version = get_variant_and_version()
+ oo_cfg.settings['variant'] = variant.name
+ oo_cfg.settings['variant_version'] = version.name
+ click.clear()
+
+ return oo_cfg
+
+
+def collect_new_nodes():
+ click.clear()
+ click.echo('***New Node Configuration***')
+ message = """
+Add new nodes here
+ """
+ click.echo(message)
+ return collect_hosts()
+
+def get_installed_hosts(hosts, callback_facts):
+ installed_hosts = []
+ for host in hosts:
+ if(host.name in callback_facts.keys()
+ and 'common' in callback_facts[host.name].keys()
+ and callback_facts[host.name]['common'].get('version', '')
+ and callback_facts[host.name]['common'].get('version', '') != 'None'):
+ installed_hosts.append(host)
+ return installed_hosts
+
+def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
+
+ # Copy the list of existing hosts so we can remove any already installed nodes.
+ hosts_to_run_on = list(oo_cfg.hosts)
+
+ # Check if master or nodes already have something installed
+ installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
+ if len(installed_hosts) > 0:
+ # present a message listing already installed hosts
+ for host in installed_hosts:
+ if host.master:
+ click.echo("{} is already an OpenShift Master".format(host))
+ # Masters stay in the list, we need to run against them when adding
+ # new nodes.
+ elif host.node:
+ click.echo("{} is already an OpenShift Node".format(host))
+ hosts_to_run_on.remove(host)
+ # for unattended either continue if they force install or exit if they didn't
+ if unattended:
+ if not force:
+ click.echo('Installed environment detected and no additional nodes specified: ' \
+ 'aborting. If you want a fresh install, use --force')
+ sys.exit(1)
+ # for attended ask the user what to do
+ else:
+ click.echo('Installed environment detected and no additional nodes specified. ')
+ response = click.prompt('Do you want to (1) add more nodes or ' \
+ '(2) perform a clean install?', type=int)
+ if response == 1: # add more nodes
+ new_nodes = collect_new_nodes()
+
+ hosts_to_run_on.extend(new_nodes)
+ oo_cfg.hosts.extend(new_nodes)
+
+ install_transactions.set_config(oo_cfg)
+ callback_facts, error = install_transactions.default_facts(oo_cfg.hosts)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+ else:
+ pass # proceeding as normal should do a clean install
+
+ return hosts_to_run_on, callback_facts
+
+@click.command()
+@click.option('--configuration', '-c',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-playbook-directory',
+ '-a',
+ type=click.Path(exists=True,
+ file_okay=False,
+ dir_okay=True,
+ writable=True,
+ readable=True),
+ # callback=validate_ansible_dir,
+ envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
+@click.option('--ansible-config',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default=None)
+@click.option('--ansible-log-path',
+ type=click.Path(file_okay=True,
+ dir_okay=False,
+ writable=True,
+ readable=True),
+ default="/tmp/ansible.log")
+@click.option('--unattended', '-u', is_flag=True, default=False)
+@click.option('--force', '-f', is_flag=True, default=False)
+#pylint: disable=too-many-arguments
+# Main CLI entrypoint, not much we can do about too many arguments.
+def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_path, unattended, force):
+ oo_cfg = OOConfig(configuration)
+
+ if not ansible_playbook_directory:
+ ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
+
+ if ansible_config:
+ oo_cfg.settings['ansible_config'] = ansible_config
+ elif os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ # If we're installed by RPM this file should exist and we can use it as our default:
+ oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
+
+ validate_ansible_dir(ansible_playbook_directory)
+ oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
+ oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+
+ oo_cfg.settings['ansible_log_path'] = ansible_log_path
+ install_transactions.set_config(oo_cfg)
+
+ if unattended:
+ error_if_missing_info(oo_cfg)
+ else:
+ oo_cfg = get_missing_info_from_user(oo_cfg)
+
+ click.echo('Gathering information from hosts...')
+ callback_facts, error = install_transactions.default_facts(oo_cfg.hosts)
+ if error:
+ click.echo("There was a problem fetching the required information. " \
+ "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ sys.exit(1)
+
+ hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force)
+
+
+ click.echo('Writing config to: %s' % oo_cfg.config_path)
+
+ # We already verified this is not the case for unattended installs, so this can
+ # only trigger for live CLI users:
+ # TODO: if there are *new* nodes and this is a live install, we may need the user
+ # to confirm the settings for new nodes. Look into this once we're distinguishing
+ # between new and pre-existing nodes.
+ if len(oo_cfg.calc_missing_facts()) > 0:
+ confirm_hosts_facts(oo_cfg, callback_facts)
+
+ oo_cfg.save_to_disk()
+
+ click.echo('Ready to run installation process.')
+ message = """
+If changes are needed to the values recorded by the installer please update {}.
+""".format(oo_cfg.config_path)
+ if not unattended:
+ confirm_continue(message)
+
+ error = install_transactions.run_main_playbook(oo_cfg.hosts,
+ hosts_to_run_on)
+ if error:
+ # The bootstrap script will print out the log location.
+ message = """
+An error was detected. After resolving the problem please relaunch the
+installation process.
+"""
+ click.echo(message)
+ sys.exit(1)
+ else:
+ message = """
+The installation was successful!
+
+If this is your first time installing please take a look at the Administrator
+Guide for advanced options related to routing, storage, authentication and much
+more:
+
+http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
+"""
+ click.echo(message)
+ click.pause()
+
+if __name__ == '__main__':
+ main()
diff --git a/utils/src/ooinstall/install_transactions.py b/utils/src/ooinstall/install_transactions.py
new file mode 100644
index 000000000..cef6662d7
--- /dev/null
+++ b/utils/src/ooinstall/install_transactions.py
@@ -0,0 +1,133 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+
+import subprocess
+import os
+import yaml
+from ooinstall.variants import find_variant
+
+CFG = None
+
+def set_config(cfg):
+ global CFG
+ CFG = cfg
+
+def generate_inventory(hosts):
+ print hosts
+ global CFG
+ base_inventory_path = CFG.settings['ansible_inventory_path']
+ base_inventory = open(base_inventory_path, 'w')
+ base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
+ base_inventory.write('\n[OSEv3:vars]\n')
+ base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+ if CFG.settings['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_sudo=true\n')
+
+ # Find the correct deployment type for ansible:
+ ver = find_variant(CFG.settings['variant'],
+ version=CFG.settings.get('variant_version', None))[1]
+ base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
+
+ if 'OO_INSTALL_DEVEL_REGISTRY' in os.environ:
+ base_inventory.write('oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:'
+ '5001/openshift3/ose-${component}:${version}\n')
+ if 'OO_INSTALL_PUDDLE_REPO_ENABLE' in os.environ:
+ base_inventory.write("openshift_additional_repos=[{'id': 'ose-devel', "
+ "'name': 'ose-devel', "
+ "'baseurl': 'http://buildvm-devops.usersys.redhat.com"
+ "/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHAOS-3.1/$basearch/os', "
+ "'enabled': 1, 'gpgcheck': 0}]\n")
+ if 'OO_INSTALL_STAGE_REGISTRY' in os.environ:
+ base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n')
+
+ base_inventory.write('\n[masters]\n')
+ masters = (host for host in hosts if host.master)
+ for master in masters:
+ write_host(master, base_inventory)
+ base_inventory.write('\n[nodes]\n')
+ nodes = (host for host in hosts if host.node)
+ for node in nodes:
+ # TODO: Until the Master can run the SDN itself we have to configure the Masters
+ # as Nodes too.
+ scheduleable = True
+ # If there's only one Node and it's also a Master we want it to be scheduleable:
+ if node in masters and len(masters) != 1:
+ scheduleable = False
+ write_host(node, base_inventory, scheduleable)
+ base_inventory.close()
+ return base_inventory_path
+
+
+def write_host(host, inventory, scheduleable=True):
+ global CFG
+ facts = ''
+ if host.ip:
+ facts += ' openshift_ip={}'.format(host.ip)
+ if host.public_ip:
+ facts += ' openshift_public_ip={}'.format(host.public_ip)
+ if host.hostname:
+ facts += ' openshift_hostname={}'.format(host.hostname)
+ if host.public_hostname:
+ facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+ # TODO: For not write_host is handles both master and nodes.
+ # Technically only nodes will ever need this.
+ if not scheduleable:
+ facts += ' openshift_scheduleable=False'
+ inventory.write('{} {}\n'.format(host, facts))
+
+
+def load_system_facts(inventory_file, os_facts_path, env_vars):
+ """
+ Retrieves system facts from the remote systems.
+ """
+ FNULL = open(os.devnull, 'w')
+ status = subprocess.call(['ansible-playbook',
+ '--inventory-file={}'.format(inventory_file),
+ os_facts_path],
+ env=env_vars,
+ stdout=FNULL)
+ if not status == 0:
+ return [], 1
+ callback_facts_file = open(CFG.settings['ansible_callback_facts_yaml'], 'r')
+ callback_facts = yaml.load(callback_facts_file)
+ callback_facts_file.close()
+ return callback_facts, 0
+
+
+def default_facts(hosts):
+ global CFG
+ inventory_file = generate_inventory(hosts)
+ os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
+
+ facts_env = os.environ.copy()
+ facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
+ facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ if 'ansible_log_path' in CFG.settings:
+ facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return load_system_facts(inventory_file, os_facts_path, facts_env)
+
+
+def run_main_playbook(hosts, hosts_to_run_on):
+ global CFG
+ inventory_file = generate_inventory(hosts)
+ if len(hosts_to_run_on) != len(hosts):
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/common/openshift-cluster/scaleup.yml')
+ else:
+ main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
+ 'playbooks/byo/config.yml')
+ facts_env = os.environ.copy()
+ if 'ansible_log_path' in CFG.settings:
+ facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+ if 'ansible_config' in CFG.settings:
+ facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+ return run_ansible(main_playbook_path, inventory_file, facts_env)
+
+def run_ansible(playbook, inventory, env_vars):
+ return subprocess.call(['ansible-playbook',
+ '--inventory-file={}'.format(inventory),
+ playbook],
+ env=env_vars)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
new file mode 100644
index 000000000..a2f53cf78
--- /dev/null
+++ b/utils/src/ooinstall/oo_config.py
@@ -0,0 +1,195 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+
+import os
+import yaml
+from pkg_resources import resource_filename
+
+PERSIST_SETTINGS = [
+ 'ansible_ssh_user',
+ 'ansible_config',
+ 'ansible_log_path',
+ 'variant',
+ 'variant_version',
+ ]
+REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
+
+
+class OOConfigFileError(Exception):
+ """The provided config file path can't be read/written
+ """
+ pass
+
+
+class OOConfigInvalidHostError(Exception):
+ """ Host in config is missing both ip and hostname. """
+ pass
+
+
+class Host(object):
+ """ A system we will or have installed OpenShift on. """
+ def __init__(self, **kwargs):
+ self.ip = kwargs.get('ip', None)
+ self.hostname = kwargs.get('hostname', None)
+ self.public_ip = kwargs.get('public_ip', None)
+ self.public_hostname = kwargs.get('public_hostname', None)
+
+ # Should this host run as an OpenShift master:
+ self.master = kwargs.get('master', False)
+
+ # Should this host run as an OpenShift node:
+ self.node = kwargs.get('node', False)
+ self.containerized = kwargs.get('containerized', False)
+
+ if self.ip is None and self.hostname is None:
+ raise OOConfigInvalidHostError("You must specify either 'ip' or 'hostname'")
+
+ if self.master is False and self.node is False:
+ raise OOConfigInvalidHostError(
+ "You must specify each host as either a master or a node.")
+
+ # Hosts can be specified with an ip, hostname, or both. However we need
+ # something authoritative we can connect to and refer to the host by.
+ # Preference given to the IP if specified as this is more specific.
+ # We know one must be set by this point.
+ self.name = self.ip if self.ip is not None else self.hostname
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+ for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
+ 'master', 'node', 'containerized']:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ return d
+
+
+class OOConfig(object):
+ new_config = True
+ default_dir = os.path.normpath(
+ os.environ.get('XDG_CONFIG_HOME',
+ os.environ['HOME'] + '/.config/') + '/openshift/')
+ default_file = '/installer.cfg.yml'
+
+ def __init__(self, config_path):
+ if config_path:
+ self.config_path = os.path.normpath(config_path)
+ else:
+ self.config_path = os.path.normpath(self.default_dir +
+ self.default_file)
+ self.settings = {}
+ self.read_config()
+ self.set_defaults()
+
+ def read_config(self, is_new=False):
+ self.hosts = []
+ try:
+ new_settings = None
+ if os.path.exists(self.config_path):
+ cfgfile = open(self.config_path, 'r')
+ new_settings = yaml.safe_load(cfgfile.read())
+ cfgfile.close()
+ if new_settings:
+ self.settings = new_settings
+ # Parse the hosts into DTO objects:
+ if 'hosts' in self.settings:
+ for host in self.settings['hosts']:
+ self.hosts.append(Host(**host))
+
+ # Watchout for the variant_version coming in as a float:
+ if 'variant_version' in self.settings:
+ self.settings['variant_version'] = \
+ str(self.settings['variant_version'])
+
+ except IOError, ferr:
+ raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
+ ferr.strerror))
+ except yaml.scanner.ScannerError:
+ raise OOConfigFileError('Config file "{}" is not a valid YAML document'.format(self.config_path))
+ self.new_config = is_new
+
+ def set_defaults(self):
+
+ if 'ansible_inventory_directory' not in self.settings:
+ self.settings['ansible_inventory_directory'] = \
+ self._default_ansible_inv_dir()
+ if not os.path.exists(self.settings['ansible_inventory_directory']):
+ os.makedirs(self.settings['ansible_inventory_directory'])
+ if 'ansible_plugins_directory' not in self.settings:
+ self.settings['ansible_plugins_directory'] = resource_filename(__name__, 'ansible_plugins')
+
+ if 'ansible_callback_facts_yaml' not in self.settings:
+ self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
+ self.settings['ansible_inventory_directory']
+
+ if 'ansible_ssh_user' not in self.settings:
+ self.settings['ansible_ssh_user'] = ''
+
+ self.settings['ansible_inventory_path'] = '{}/hosts'.format(self.settings['ansible_inventory_directory'])
+
+ # clean up any empty sets
+ for setting in self.settings.keys():
+ if not self.settings[setting]:
+ self.settings.pop(setting)
+
+ def _default_ansible_inv_dir(self):
+ return os.path.normpath(
+ os.path.dirname(self.config_path) + "/.ansible")
+
+ def calc_missing_facts(self):
+ """
+ Determine which host facts are not defined in the config.
+
+ Returns a hash of host to a list of the missing facts.
+ """
+ result = {}
+
+ for host in self.hosts:
+ missing_facts = []
+ for required_fact in REQUIRED_FACTS:
+ if not getattr(host, required_fact):
+ missing_facts.append(required_fact)
+ if len(missing_facts) > 0:
+ result[host.name] = missing_facts
+ return result
+
+ def save_to_disk(self):
+ out_file = open(self.config_path, 'w')
+ out_file.write(self.yaml())
+ out_file.close()
+
+ def persist_settings(self):
+ p_settings = {}
+ for setting in PERSIST_SETTINGS:
+ if setting in self.settings and self.settings[setting]:
+ p_settings[setting] = self.settings[setting]
+ p_settings['hosts'] = []
+ for host in self.hosts:
+ p_settings['hosts'].append(host.to_dict())
+
+ if self.settings['ansible_inventory_directory'] != \
+ self._default_ansible_inv_dir():
+ p_settings['ansible_inventory_directory'] = \
+ self.settings['ansible_inventory_directory']
+
+ return p_settings
+
+ def yaml(self):
+ return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
+
+ def __str__(self):
+ return self.yaml()
+
+ def get_host(self, name):
+ for host in self.hosts:
+ if host.name == name:
+ return host
+ return None
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
new file mode 100644
index 000000000..ed98429fc
--- /dev/null
+++ b/utils/src/ooinstall/variants.py
@@ -0,0 +1,74 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-few-public-methods
+
+"""
+Defines the supported variants and versions the installer supports, and metadata
+required to run Ansible correctly.
+
+This module needs to be updated for each major release to allow the new version
+to be specified by the user, and to point the generic variants to the latest
+version.
+"""
+
+
+class Version(object):
+ def __init__(self, name, ansible_key):
+ self.name = name # i.e. 3.0, 3.1
+
+ self.ansible_key = ansible_key
+
+
+class Variant(object):
+ def __init__(self, name, description, versions):
+ # Supported variant name:
+ self.name = name
+
+ # Friendly name for the variant:
+ self.description = description
+
+ self.versions = versions
+
+
+# WARNING: Keep the versions ordered, most recent last:
+OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
+ [
+ Version('3.0', 'enterprise'),
+ Version('3.1', 'openshift-enterprise')
+ ]
+)
+
+AEP = Variant('atomic-enterprise', 'Atomic OpenShift Enterprise',
+ [
+ Version('3.1', 'atomic-enterprise')
+ ]
+)
+
+# Ordered list of variants we can install, first is the default.
+SUPPORTED_VARIANTS = (OSE, AEP)
+
+
+def find_variant(name, version=None):
+ """
+ Locate the variant object for the variant given in config file, and
+ the correct version to use for it.
+ Return (None, None) if we can't find a match.
+ """
+ prod = None
+ for prod in SUPPORTED_VARIANTS:
+ if prod.name == name:
+ if version is None:
+ return (prod, prod.versions[-1])
+ for v in prod.versions:
+ if v.name == version:
+ return (prod, v)
+
+ return (None, None)
+
+def get_variant_version_combos():
+ combos = []
+ for variant in SUPPORTED_VARIANTS:
+ for ver in variant.versions:
+ combos.append((variant, ver))
+ return combos
+
diff --git a/utils/test/__init__.py b/utils/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/test/__init__.py
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
new file mode 100644
index 000000000..076fe5dc9
--- /dev/null
+++ b/utils/test/cli_installer_tests.py
@@ -0,0 +1,471 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import copy
+import os
+import ConfigParser
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from click.testing import CliRunner
+from test.oo_config_tests import OOInstallFixture
+from mock import patch
+
+
+MOCK_FACTS = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+}
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.main, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exception is not None or result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _read_yaml(self, config_file_path):
+ f = open(config_file_path, 'r')
+ config = yaml.safe_load(f.read())
+ f.close()
+ return config
+
+
+class UnattendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ self.cli_args.append("-u")
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file])
+ result = self.runner.invoke(cli.main, self.cli_args)
+ self.assert_result(result, 0)
+
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(3, len(hosts))
+ self.assertEquals(3, len(hosts_to_run_on))
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_inventory_write(self, load_facts_mock, run_playbook_mock):
+
+ # Add an ssh user so we can verify it makes it to the inventory file:
+ merged_config = "%s\n%s" % (SAMPLE_CONFIG % 'openshift-enterprise',
+ "ansible_ssh_user: bob")
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), merged_config)
+
+ self.cli_args.extend(["-c", config_file])
+ result = self.runner.invoke(cli.main, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Check the inventory file looks as we would expect:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('bob',
+ inventory.get('OSEv3:vars', 'ansible_ssh_user'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ # Check the masters:
+ self.assertEquals(1, len(inventory.items('masters')))
+ self.assertEquals(3, len(inventory.items('nodes')))
+
+ for item in inventory.items('masters'):
+ # ansible host lines do NOT parse nicely:
+ master_line = item[0]
+ if item[1] is not None:
+ master_line = "%s=%s" % (master_line, item[1])
+ self.assertTrue('openshift_ip' in master_line)
+ self.assertTrue('openshift_public_ip' in master_line)
+ self.assertTrue('openshift_hostname' in master_line)
+ self.assertTrue('openshift_public_hostname' in master_line)
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_variant_version_latest_assumed(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file])
+ result = self.runner.invoke(cli.main, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = self._read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # We didn't specify a version so the latest should have been assumed,
+ # and written to disk:
+ self.assertEquals('3.1', written_config['variant_version'])
+
+ # Make sure the correct value was passed to ansible:
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('openshift-enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_variant_version_preserved(self, load_facts_mock,
+ run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = '%s\n%s' % (config, 'variant_version: 3.0')
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), config)
+
+ self.cli_args.extend(["-c", config_file])
+ result = self.runner.invoke(cli.main, self.cli_args)
+ self.assert_result(result, 0)
+
+ written_config = self._read_yaml(config_file)
+
+ self.assertEquals('openshift-enterprise', written_config['variant'])
+ # Make sure our older version was preserved:
+ # and written to disk:
+ self.assertEquals('3.0', written_config['variant_version'])
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('enterprise',
+ inventory.get('OSEv3:vars', 'deployment_type'))
+
+ @patch('ooinstall.install_transactions.run_ansible')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, None, None)
+
+ @patch('ooinstall.install_transactions.run_ansible')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, ansible_config, ansible_config)
+
+ @patch('ooinstall.install_transactions.run_ansible')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_ansible_config_specified_in_installer_config(self,
+ load_facts_mock, run_ansible_mock):
+
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ ansible_config = os.path.join(self.work_dir, 'ansible.cfg')
+ config = SAMPLE_CONFIG % 'openshift-enterprise'
+ config = "%s\nansible_config: %s" % (config, ansible_config)
+ self._ansible_config_test(load_facts_mock, run_ansible_mock,
+ config, None, ansible_config)
+
+ #pylint: disable=too-many-arguments
+ # This method allows for drastically simpler tests to write, and the args
+ # are all useful.
+ def _ansible_config_test(self, load_facts_mock, run_ansible_mock,
+ installer_config, ansible_config_cli=None, expected_result=None):
+ """
+ Utility method for testing the ways you can specify the ansible config.
+ """
+
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_ansible_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), installer_config)
+
+ self.cli_args.extend(["-c", config_file])
+ if ansible_config_cli:
+ self.cli_args.extend(["--ansible-config", ansible_config_cli])
+ result = self.runner.invoke(cli.main, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Test the env vars for facts playbook:
+ facts_env_vars = load_facts_mock.call_args[0][2]
+ if expected_result:
+ self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
+ else:
+ self.assertFalse('ANSIBLE_CONFIG' in facts_env_vars)
+
+ # Test the env vars for main playbook:
+ env_vars = run_ansible_mock.call_args[0][2]
+ if expected_result:
+ self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
+ else:
+ self.assertFalse('ANSIBLE_CONFIG' in env_vars)
+
+
+class AttendedCliTests(OOCliFixture):
+
+ def setUp(self):
+ OOCliFixture.setUp(self)
+ # Doesn't exist but keeps us from reading the local users config:
+ self.config_file = os.path.join(self.work_dir, 'config.yml')
+ self.cli_args.extend(["-c", self.config_file])
+
+ #pylint: disable=too-many-arguments
+ def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None):
+ """
+ Builds a CLI input string with newline characters to simulate
+ the full run.
+ This gives us only one place to update when the input prompts change.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if hosts:
+ i = 0
+ for (host, is_master) in hosts:
+ inputs.append(host)
+ inputs.append('y' if is_master else 'n')
+ inputs.append('rpm')
+ if i < len(hosts) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master) in add_nodes:
+ inputs.append(host)
+ inputs.append('y' if is_master else 'n')
+ inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ ])
+
+ return '\n'.join(inputs)
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue(h['node'])
+ self.assertTrue('ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('public_hostname' in h)
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', False),
+ ('10.0.0.3', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ result = self.runner.invoke(cli.main, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 3)
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_add_nodes(self, load_facts_mock, run_playbook_mock):
+
+ # Modify the mock facts to return a version indicating OpenShift
+ # is already installed on our master, and the first node.
+ mock_facts = copy.deepcopy(MOCK_FACTS)
+ mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+ mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = self._build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', False),
+ ],
+ add_nodes=[('10.0.0.3', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ result = self.runner.invoke(cli.main,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 2)
+
+ written_config = self._read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 3)
+
+ @patch('ooinstall.install_transactions.run_main_playbook')
+ @patch('ooinstall.install_transactions.load_system_facts')
+ def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'),
+ SAMPLE_CONFIG % 'openshift-enterprise')
+ cli_input = self._build_input(confirm_facts='y')
+ self.cli_args.extend(["-c", config_file])
+ result = self.runner.invoke(cli.main,
+ self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 3, 3)
+
+ written_config = self._read_yaml(config_file)
+ self._verify_config_hosts(written_config, 3)
+
+# TODO: test with config file, attended add node
+# TODO: test with config file, attended new node already in config file
+# TODO: test with config file, attended new node already in config file, plus manually added nodes
+# TODO: test with config file, attended reject facts
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
new file mode 100644
index 000000000..01af33fd9
--- /dev/null
+++ b/utils/test/oo_config_tests.py
@@ -0,0 +1,158 @@
+# TODO: Temporarily disabled due to importing old code into openshift-ansible
+# repo. We will work on these over time.
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
+
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
+
+SAMPLE_CONFIG = """
+variant: openshift-enterprise
+ansible_ssh_user: root
+hosts:
+ - ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+CONFIG_INCOMPLETE_FACTS = """
+hosts:
+ - ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ node: true
+ - ip: 10.0.0.3
+ node: true
+"""
+
+
+class OOInstallFixture(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='ooconfigtests')
+ self.tempfiles.append(self.work_dir)
+
+ def tearDown(self):
+ for path in self.tempfiles:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+
+ def write_config(self, path, config_str):
+ """
+ Write given config to a temporary file which will be cleaned
+ up in teardown.
+ Returns full path to the file.
+ """
+ cfg_file = open(path, 'w')
+ cfg_file.write(config_str)
+ cfg_file.close()
+ return path
+
+
+class OOConfigTests(OOInstallFixture):
+
+ def test_load_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+
+ self.assertEquals(3, len(ooconfig.hosts))
+ self.assertEquals("10.0.0.1", ooconfig.hosts[0].name)
+ self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip)
+ self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname)
+
+ self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
+ [host['ip'] for host in ooconfig.settings['hosts']])
+
+ self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
+
+ def test_load_complete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(0, len(missing_host_facts))
+
+ # Test missing optional facts the user must confirm:
+ def test_load_host_incomplete_facts(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
+ ooconfig = OOConfig(cfg_path)
+ missing_host_facts = ooconfig.calc_missing_facts()
+ self.assertEquals(2, len(missing_host_facts))
+ self.assertEquals(1, len(missing_host_facts['10.0.0.2']))
+ self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
+
+ def test_write_config(self):
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG)
+ ooconfig = OOConfig(cfg_path)
+ ooconfig.save_to_disk()
+
+ f = open(cfg_path, 'r')
+ written_config = yaml.safe_load(f.read())
+ f.close()
+
+ self.assertEquals(3, len(written_config['hosts']))
+ for h in written_config['hosts']:
+ self.assertTrue('ip' in h)
+ self.assertTrue('public_ip' in h)
+ self.assertTrue('hostname' in h)
+ self.assertTrue('public_hostname' in h)
+
+ self.assertTrue('ansible_ssh_user' in written_config)
+ self.assertTrue('variant' in written_config)
+
+ # Some advanced settings should not get written out if they
+ # were not specified by the user:
+ self.assertFalse('ansible_inventory_directory' in written_config)
+
+
+class HostTests(OOInstallFixture):
+
+ def test_load_host_no_ip_or_hostname(self):
+ yaml_props = {
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ 'master': True
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+ def test_load_host_no_master_or_node_specified(self):
+ yaml_props = {
+ 'ip': '192.168.0.1',
+ 'hostname': 'a.example.com',
+ 'public_ip': '192.168.0.1',
+ 'public_hostname': 'a.example.com',
+ }
+ self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
+
+
+
+
diff --git a/utils/workflows/enterprise_deploy/openshift.sh b/utils/workflows/enterprise_deploy/openshift.sh
new file mode 100644
index 000000000..040a9a84d
--- /dev/null
+++ b/utils/workflows/enterprise_deploy/openshift.sh
@@ -0,0 +1,2 @@
+# This file is not used for OpenShift 3.0. It's merely an artifact of the the
+# installation framework originally used for OpenShift 2.x.