summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_AWS.md21
-rw-r--r--README_OSE.md191
-rw-r--r--README_libvirt.md78
-rwxr-xr-xbin/cluster91
-rw-r--r--filter_plugins/oo_filters.py125
-rw-r--r--git/.pylintrc390
-rwxr-xr-xgit/parent.rb45
-rwxr-xr-xgit/pylint.sh14
-rwxr-xr-xgit/yaml_validation.rb72
-rw-r--r--inventory/aws/group_vars/all2
-rw-r--r--inventory/aws/hosts/ec2.ini (renamed from inventory/aws/ec2.ini)0
-rwxr-xr-xinventory/aws/hosts/ec2.py (renamed from inventory/aws/ec2.py)0
-rw-r--r--inventory/aws/hosts/hosts1
-rw-r--r--inventory/byo/group_vars/all28
-rw-r--r--inventory/byo/hosts26
-rw-r--r--inventory/gce/group_vars/all2
-rwxr-xr-xinventory/gce/hosts/gce.py (renamed from inventory/gce/gce.py)0
-rw-r--r--inventory/gce/hosts/hosts1
-rw-r--r--inventory/libvirt/group_vars/all2
-rw-r--r--inventory/libvirt/hosts2
-rw-r--r--inventory/libvirt/hosts/hosts1
-rw-r--r--inventory/libvirt/hosts/libvirt.ini20
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py179
-rwxr-xr-xinventory/multi_ec2.py95
-rw-r--r--inventory/multi_ec2.yaml.example4
-rw-r--r--playbooks/aws/openshift-cluster/config.yml36
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml74
-rw-r--r--playbooks/aws/openshift-cluster/launch_instances.yml63
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py302
-rw-r--r--playbooks/aws/openshift-cluster/list.yml15
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml132
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j229
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml20
-rw-r--r--playbooks/aws/openshift-cluster/update.yml25
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml9
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml37
-rw-r--r--playbooks/aws/openshift-master/config.yml27
-rw-r--r--playbooks/aws/openshift-master/launch.yml8
-rw-r--r--playbooks/aws/openshift-master/terminate.yml52
-rw-r--r--playbooks/aws/openshift-master/vars.yml3
-rw-r--r--playbooks/aws/openshift-node/config.yml110
-rw-r--r--playbooks/aws/openshift-node/launch.yml10
-rw-r--r--playbooks/aws/openshift-node/terminate.yml52
-rw-r--r--playbooks/aws/openshift-node/vars.yml3
-rw-r--r--playbooks/aws/terminate.yml64
-rw-r--r--playbooks/byo/openshift-master/config.yml20
-rw-r--r--playbooks/byo/openshift-node/config.yml90
-rw-r--r--playbooks/byo/openshift_facts.yml10
-rw-r--r--playbooks/common/openshift-cluster/config.yml4
l---------playbooks/common/openshift-cluster/filter_plugins (renamed from playbooks/libvirt/openshift-master/filter_plugins)0
l---------playbooks/common/openshift-cluster/roles (renamed from playbooks/libvirt/openshift-master/roles)0
-rw-r--r--playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml11
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-master/config.yml19
l---------playbooks/common/openshift-master/filter_plugins (renamed from playbooks/libvirt/openshift-node/filter_plugins)0
l---------playbooks/common/openshift-master/roles1
-rw-r--r--playbooks/common/openshift-node/config.yml127
l---------playbooks/common/openshift-node/filter_plugins1
l---------playbooks/common/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-cluster/config.yml37
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml72
-rw-r--r--playbooks/gce/openshift-cluster/list.yml15
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/gce/openshift-cluster/launch_instances.yml)25
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml22
-rw-r--r--playbooks/gce/openshift-cluster/update.yml25
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml14
-rw-r--r--playbooks/gce/openshift-master/config.yml24
-rw-r--r--playbooks/gce/openshift-master/launch.yml6
-rw-r--r--playbooks/gce/openshift-master/terminate.yml11
-rw-r--r--playbooks/gce/openshift-master/vars.yml3
-rw-r--r--playbooks/gce/openshift-node/config.yml106
-rw-r--r--playbooks/gce/openshift-node/launch.yml6
-rw-r--r--playbooks/gce/openshift-node/terminate.yml11
-rw-r--r--playbooks/gce/openshift-node/vars.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml38
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml81
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml50
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml27
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml23
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml (renamed from playbooks/libvirt/openshift-cluster/launch_instances.yml)63
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml (renamed from playbooks/libvirt/templates/domain.xml)14
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/meta-data3
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml23
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data23
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml69
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml18
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml38
-rw-r--r--playbooks/libvirt/openshift-master/config.yml21
-rw-r--r--playbooks/libvirt/openshift-master/vars.yml1
-rw-r--r--playbooks/libvirt/openshift-node/config.yml102
l---------playbooks/libvirt/openshift-node/roles1
-rw-r--r--playbooks/libvirt/openshift-node/vars.yml1
-rw-r--r--playbooks/libvirt/templates/meta-data2
-rw-r--r--playbooks/libvirt/templates/user-data10
-rw-r--r--roles/openshift_common/tasks/main.yml4
-rw-r--r--roles/openshift_common/vars/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py92
-rw-r--r--roles/openshift_master/tasks/main.yml86
-rw-r--r--roles/openshift_master/vars/main.yml5
-rw-r--r--roles/openshift_node/tasks/main.yml32
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml3
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py432
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml69
-rw-r--r--roles/openshift_register_nodes/vars/main.yml7
-rw-r--r--roles/openshift_repos/README.md2
-rw-r--r--roles/openshift_repos/defaults/main.yaml5
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta61
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release63
-rw-r--r--roles/openshift_repos/files/online/epel7-kubernetes.repo6
-rw-r--r--roles/openshift_repos/files/online/epel7-openshift.repo6
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo23
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo21
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo10
-rw-r--r--roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo (renamed from roles/openshift_repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo7
-rw-r--r--roles/openshift_repos/files/removed/repos/epel7-openshift.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo0
-rw-r--r--roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo0
-rw-r--r--roles/openshift_repos/tasks/main.yaml14
-rw-r--r--roles/openshift_repos/templates/yum_repo.j21
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml11
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml32
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py92
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml1
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml1
131 files changed, 3231 insertions, 1637 deletions
diff --git a/README_AWS.md b/README_AWS.md
index 37f4c5f51..888abe939 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -40,11 +40,25 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
By default, a cluster is launched with the following configuration:
- Instance type: m3.large
-- AMI: ami-307b3658
+- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
- Region: us-east-1
- Keypair name: libra
- Security group: public
+Master specific defaults:
+- Master root volume size: 10 (in GiBs)
+- Master root volume type: gp2
+- Master root volume iops: 500 (only applicable when volume type is io1)
+
+Node specific defaults:
+- Node root volume size: 10 (in GiBs)
+- Node root volume type: gp2
+- Node root volume iops: 500 (only applicable when volume type is io1)
+- Docker volume size: 25 (in GiBs)
+- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
+- Docker volume type: gp2 (only applicable if ephemeral is false)
+- Docker volume iops: 500 (only applicable when volume type is io1)
+
If needed, these values can be changed by setting environment variables on your system.
- export ec2_instance_type='m3.large'
@@ -52,6 +66,11 @@ If needed, these values can be changed by setting environment variables on your
- export ec2_region='us-east-1'
- export ec2_keypair='libra'
- export ec2_security_group='public'
+- export os_master_root_vol_size='20'
+- export os_master_root_vol_type='standard'
+- export os_node_root_vol_size='15'
+- export os_docker_vol_size='50'
+- export os_docker_vol_ephemeral='false'
Install Dependencies
--------------------
diff --git a/README_OSE.md b/README_OSE.md
index 6ebdb7f99..cd0b9f7a6 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -7,15 +7,17 @@
* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
* [Running the ansible playbooks](#running-the-ansible-playbooks)
* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
* ansible
- * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+
+ * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
+ * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1 VMs
-* ssh key based auth for the root user needs to be pre-configured from the host
- running ansible to the remote hosts
+* Either ssh key based auth for the root user or ssh key based auth for a user
+ with sudo access (no password)
* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
```sh
@@ -48,9 +50,6 @@ subscription-manager repos \
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
-* End-to-end testing has not been completed yet using this module
-* root user is used for all ansible actions; eventually we will support using
- a non-root user with sudo.
## Configuring the host inventory
[Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -64,6 +63,38 @@ option to ansible-playbook.
```ini
# This is an example of a bring your own (byo) host inventory
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
# host group for masters
[masters]
ose3-master.example.com
@@ -76,51 +107,13 @@ ose3-node[1:2].example.com
The hostnames above should resolve both from the hosts themselves and
the host where ansible is running (if different).
-## Creating the default variables for the hosts and host groups
-[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9)
-
-#### Group vars for all hosts
-/etc/ansible/group_vars/all:
-```yaml
----
-# Assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# Default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# Set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# To use the latest OpenShift Enterprise Errata puddle:
-#openshift_additional_repos:
-#- id: ose-devel
-# name: ose-devel
-# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-# enabled: 1
-# gpgcheck: 0
-# To use the latest OpenShift Enterprise Whitelist puddle:
-openshift_additional_repos:
-- id: ose-devel
- name: ose-devel
- baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
- enabled: 1
- gpgcheck: 0
-
-```
-
## Running the ansible playbooks
From the openshift-ansible checkout run:
```sh
ansible-playbook playbooks/byo/config.yml
```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts and the
-group_vars are defined in /etc/ansible/group_vars, if using a different
-inventory file (and a group_vars directory that is in the same directory as
-the directory as the inventory) use the -i option for ansible-playbook.
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
## Post-ansible steps
#### Create the default router
@@ -140,3 +133,109 @@ openshift ex registry --create=true \
--images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
--mount-host=/var/lib/openshift/docker-registry
```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "ip": "172.16.4.79",
+ "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.45",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ... <snip> ...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.42] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "ip": "172.16.4.75",
+ "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.42",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+ok: [10.3.9.36] => {
+ "result": {
+ "ansible_facts": {
+ "openshift": {
+ "common": {
+ "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "ip": "172.16.4.73",
+ "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+ "public_ip": "10.3.9.36",
+ "use_openshift_sdn": true
+ },
+ "provider": {
+ ...<snip>...
+ }
+ }
+ },
+ "changed": false,
+ "invocation": {
+ "module_args": "",
+ "module_name": "openshift_facts"
+ }
+ }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+ * Should resolve to the internal ip from the instances themselves.
+ * openshift_hostname will override.
+* ip
+ * Should be the internal ip of the instance.
+ * openshift_ip will override.
+* public hostname
+ * Should resolve to the external ip from hosts outside of the cloud
+ * provider openshift_public_hostname will override.
+* public_ip
+ * Should be the externally accessible ip associated with the instance
+ * openshift_public_ip will override
+* use_openshift_sdn
+ * Should be true unless the cloud is GCE.
+ * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```
diff --git a/README_libvirt.md b/README_libvirt.md
index fd2eb57f6..bcbaf4bd5 100644
--- a/README_libvirt.md
+++ b/README_libvirt.md
@@ -1,4 +1,3 @@
-
LIBVIRT Setup instructions
==========================
@@ -9,19 +8,21 @@ This makes `libvirt` useful to develop, test and debug Openshift and openshift-a
Install dependencies
--------------------
-1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2. Install [ebtables](http://ebtables.netfilter.org/)
-3. Install [qemu](http://wiki.qemu.org/Main_Page)
-4. Install [libvirt](http://libvirt.org/)
-5. Enable and start the libvirt daemon, e.g:
- * ``systemctl enable libvirtd``
- * ``systemctl start libvirtd``
-6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7. Check that your `$HOME` is accessible to the qemu user²
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+ - `systemctl enable libvirtd`
+ - `systemctl start libvirtd`
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+8. Configure dns resolution on the host³
#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
You can test it with the following command:
+
```
virsh -c qemu:///system pool-list
```
@@ -67,12 +68,7 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat
error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
```
-In order to fix that issue, you have several possibilities:
-* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
- * backed by a filesystem with a lot of free disk space
- * writable by your user;
- * accessible by the qemu user.
-* Grant the qemu user access to the storage pool.
+In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
On Arch:
@@ -80,13 +76,55 @@ On Arch:
setfacl -m g:kvm:--x ~
```
-Test the setup
+#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
+
+- Verify NetworkManager is configured to use dnsmasq:
+
+```sh
+$ sudo vi /etc/NetworkManager/NetworkManager.conf
+[main]
+dns=dnsmasq
+```
+
+- Configure dnsmasq to use the Virtual Network router for example.com:
+
+```sh
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+```
+
+Test The Setup
--------------
+1. cd openshift-ansible/
+2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
+
+```
+ bin/cluster list libvirt ''
```
-cd openshift-ansible
-bin/cluster create -m 1 -n 3 libvirt lenaic
+Creating a cluster
+------------------
+
+1. To create a cluster with one master and two nodes
-bin/cluster terminate libvirt lenaic
+```
+ bin/cluster create libvirt lenaic
+```
+
+Updating a cluster
+------------------
+
+1. To update the cluster
+
+```
+ bin/cluster update libvirt lenaic
+```
+
+Terminating a cluster
+---------------------
+
+1. To terminate the cluster
+
+```
+ bin/cluster terminate libvirt lenaic
```
diff --git a/bin/cluster b/bin/cluster
index ca227721e..79f1f988f 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -22,13 +22,28 @@ class Cluster(object):
'-o ControlPersist=600s '
)
+ def get_deployment_type(self, args):
+ """
+ Get the deployment_type based on the environment variables and the
+ command line arguments
+ :param args: command line arguments provided by the user
+ :return: string representing the deployment type
+ """
+ deployment_type = 'origin'
+ if args.deployment_type:
+ deployment_type = args.deployment_type
+ elif 'OS_DEPLOYMENT_TYPE' in os.environ:
+ deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
+ return deployment_type
+
def create(self, args):
"""
Create an OpenShift cluster for given provider
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -43,7 +58,8 @@ class Cluster(object):
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -55,19 +71,34 @@ class Cluster(object):
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
return self.action(args, inventory, env, playbook)
+ def config(self, args):
+ """
+ Configure or reconfigure OpenShift across clustered VMs
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
def update(self, args):
"""
Update to latest OpenShift across clustered VMs
:param args: command line arguments provided by user
:return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id}
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
@@ -81,19 +112,19 @@ class Cluster(object):
"""
config = ConfigParser.ConfigParser()
if 'gce' == provider:
- config.readfp(open('inventory/gce/gce.ini'))
+ config.readfp(open('inventory/gce/hosts/gce.ini'))
for key in config.options('gce'):
os.environ[key] = config.get('gce', key)
- inventory = '-i inventory/gce/gce.py'
+ inventory = '-i inventory/gce/hosts'
elif 'aws' == provider:
- config.readfp(open('inventory/aws/ec2.ini'))
+ config.readfp(open('inventory/aws/hosts/ec2.ini'))
for key in config.options('ec2'):
os.environ[key] = config.get('ec2', key)
- inventory = '-i inventory/aws/ec2.py'
+ inventory = '-i inventory/aws/hosts'
elif 'libvirt' == provider:
inventory = '-i inventory/libvirt/hosts'
else:
@@ -145,29 +176,49 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
)
- parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+ parser.add_argument('-v', '--verbose', action='count',
+ help='Multiple -v options increase the verbosity')
parser.add_argument('--version', action='version', version='%(prog)s 0.2')
meta_parser = argparse.ArgumentParser(add_help=False)
meta_parser.add_argument('provider', choices=providers, help='provider')
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
-
- action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions')
-
- create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser])
- create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
- create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+ meta_parser.add_argument('-t', '--deployment-type',
+ choices=['origin', 'online', 'enterprise'],
+ help='Deployment type. (default: origin)')
+
+ action_parser = parser.add_subparsers(dest='action', title='actions',
+ description='Choose from valid actions')
+
+ create_parser = action_parser.add_parser('create', help='Create a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-m', '--masters', default=1, type=int,
+ help='number of masters to create in cluster')
+ create_parser.add_argument('-n', '--nodes', default=2, type=int,
+ help='number of nodes to create in cluster')
create_parser.set_defaults(func=cluster.create)
- terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser])
- terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation')
+ config_parser = action_parser.add_parser('config',
+ help='Configure or reconfigure a cluster',
+ parents=[meta_parser])
+ config_parser.set_defaults(func=cluster.config)
+
+ terminate_parser = action_parser.add_parser('terminate',
+ help='Destroy a cluster',
+ parents=[meta_parser])
+ terminate_parser.add_argument('-f', '--force', action='store_true',
+ help='Destroy cluster without confirmation')
terminate_parser.set_defaults(func=cluster.terminate)
- update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser])
- update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation')
+ update_parser = action_parser.add_parser('update',
+ help='Update OpenShift across cluster',
+ parents=[meta_parser])
+ update_parser.add_argument('-f', '--force', action='store_true',
+ help='Update cluster without confirmation')
update_parser.set_defaults(func=cluster.update)
- list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser])
+ list_parser = action_parser.add_parser('list', help='List VMs in cluster',
+ parents=[meta_parser])
list_parser.set_defaults(func=cluster.list)
args = parser.parse_args()
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 1cf02218c..097038450 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,13 +1,17 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
-from ansible import errors, runner
-import json
+from ansible import errors
+from operator import itemgetter
import pdb
def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
Ex: "{{ hostvars | oo_pdb }}"
'''
pdb.set_trace()
@@ -20,7 +24,8 @@ def oo_len(arg):
return len(arg)
def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+ ''' This looks up dictionary attributes of the form a.b.c and returns
+ the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
@@ -40,12 +45,13 @@ def oo_flatten(data):
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
- return [ item for sublist in data for item in sublist ]
+ return [item for sublist in data for item in sublist]
-def oo_collect(data, attribute=None, filters={}):
- ''' This takes a list of dict and collects all attributes specified into a list
- If filter is specified then we will include all items that match _ALL_ of filters.
+def oo_collect(data, attribute=None, filters=None):
+ ''' This takes a list of dict and collects all attributes specified into a
+ list If filter is specified then we will include all items that match
+ _ALL_ of filters.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
@@ -55,15 +61,18 @@ def oo_collect(data, attribute=None, filters={}):
filters = {'z': 'z'}
returns [1, 2, 3]
'''
-
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if filters:
- retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+ if filters is not None:
+ if not issubclass(type(filters), dict):
+ raise errors.AnsibleFilterError("|fialed expects filter to be a"
+ " dict")
+ retval = [get_attr(d, attribute) for d in data if (
+ all([d[key] == filters[key] for key in filters]))]
else:
retval = [get_attr(d, attribute) for d in data]
@@ -77,7 +86,7 @@ def oo_select_keys(data, keys):
'''
if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict")
if not issubclass(type(keys), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
@@ -97,17 +106,91 @@ def oo_prepend_strings_in_list(data, prepend):
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
retval = [prepend + s for s in data]
return retval
-class FilterModule (object):
+def oo_ami_selector(data, image_name):
+ ''' This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+ ''' This takes a dictionary of volume definitions and returns a valid ec2
+ volume definition based on the host_type and the values in the
+ dictionary.
+ The dictionary should look similar to this:
+ { 'master':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'gp2',
+ 'iops': 500
+ }
+ },
+ 'node':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'io1',
+ 'iops': 1000
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
+ }
+ }
+ '''
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects first param is a dict")
+ if host_type not in ['master', 'node']:
+ raise errors.AnsibleFilterError("|failed expects either master or node"
+ " host type")
+
+ root_vol = data[host_type]['root']
+ root_vol['device_name'] = '/dev/sda1'
+ root_vol['delete_on_termination'] = True
+ if root_vol['device_type'] != 'io1':
+ root_vol.pop('iops', None)
+ if host_type == 'node':
+ docker_vol = data[host_type]['docker']
+ docker_vol['device_name'] = '/dev/xvdb'
+ docker_vol['delete_on_termination'] = True
+ if docker_vol['device_type'] != 'io1':
+ docker_vol.pop('iops', None)
+ if docker_ephemeral:
+ docker_vol.pop('device_type', None)
+ docker_vol.pop('delete_on_termination', None)
+ docker_vol['ephemeral'] = 'ephemeral0'
+ return [root_vol, docker_vol]
+ return [root_vol]
+
+# disabling pylint checks for too-few-public-methods and no-self-use since we
+# need to expose a FilterModule object that has a filters method that returns
+# a mapping of filter names to methods.
+# pylint: disable=too-few-public-methods, no-self-use
+class FilterModule(object):
+ ''' FilterModule '''
def filters(self):
+ ''' returns a mapping of filters to methods '''
return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_flatten": oo_flatten,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb,
- "oo_prepend_strings_in_list": oo_prepend_strings_in_list
- }
+ "oo_select_keys": oo_select_keys,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_len": oo_len,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_ami_selector": oo_ami_selector,
+ "oo_ec2_volume_definition": oo_ec2_volume_definition
+ }
diff --git a/git/.pylintrc b/git/.pylintrc
new file mode 100644
index 000000000..2d45f867e
--- /dev/null
+++ b/git/.pylintrc
@@ -0,0 +1,390 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Deprecated. It was used to include message's id in output. Use --msg-template
+# instead.
+#include-ids=no
+
+# Deprecated. It was used to include symbolic ids of messages in output. Use
+# --msg-template instead.
+#symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/git/parent.rb b/git/parent.rb
new file mode 100755
index 000000000..2acb127c4
--- /dev/null
+++ b/git/parent.rb
@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+#
+#
+#
+
+if __FILE__ == $0
+ # If we aren't on master we don't need to parent check
+ branch = 'prod'
+ exit(0) if ARGV[0] !~ /#{branch}/
+ commit_id = ARGV[1]
+ %x[/usr/bin/git checkout #{branch}]
+ %x[/usr/bin/git merge #{commit_id}]
+
+ count = 0
+ #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
+ lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
+ lines.each do |commit|
+ # next if they are in stage
+ next if commit =~ /^</
+ # remove the first char '>'
+ commit = commit[1..-1]
+ # check if any remote branches contain $commit
+ results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
+ # if this comes back empty, nothing contains it, we can skip it as
+ # we have probably created the merge commit here locally
+ next if results.empty?
+
+ # The results generally contain origin/pr/246/merge and origin/pr/246/head
+ # this is the pull request which would contain the commit in question.
+ #
+ # If the results do not contain origin/stg then stage does not contain
+ # the commit in question. Therefore we need to alert!
+ unless results =~ /origin\/stg/
+ puts "\nFAILED: (These commits are not in stage.)\n"
+ puts "\t#{commit}"
+ count += 1
+ end
+ end
+
+ # Exit with count of commits in #{branch} but not stg
+ exit(count)
+end
+
+__END__
+
diff --git a/git/pylint.sh b/git/pylint.sh
new file mode 100755
index 000000000..286747565
--- /dev/null
+++ b/git/pylint.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+
+OLDREV=$1
+NEWREV=$2
+TRG_BRANCH=$3
+
+PYTHON=/var/lib/jenkins/python27/bin/python
+
+/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
+ grep ".py$" | \
+ xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc {}
+
+exit $?
diff --git a/git/yaml_validation.rb b/git/yaml_validation.rb
new file mode 100755
index 000000000..f5ded7a78
--- /dev/null
+++ b/git/yaml_validation.rb
@@ -0,0 +1,72 @@
+#!/usr/bin/env ruby
+#
+#
+#
+require 'yaml'
+require 'tmpdir'
+
+class YamlValidate
+ def self.yaml_file?(filename)
+ return filename.end_with?('.yaml') || filename.end_with?('.yml')
+ end
+
+ def self.short_yaml_ext?(filename)
+ return filename.end_with?(".yml")
+ end
+
+ def self.valid_yaml?(filename)
+ YAML::load_file(filename)
+
+ return true
+ end
+end
+
+class GitCommit
+ attr_accessor :oldrev, :newrev, :refname, :tmp
+ def initialize(oldrev, newrev, refname)
+ @oldrev = oldrev
+ @newrev = newrev
+ @refname = refname
+ @tmp = Dir.mktmpdir(@newrev)
+ end
+
+ def get_file_changes()
+ files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
+
+ # if files is empty we will get a full checkout. This happens on
+ # a git rm file. If there are no changes then we need to skip the archive
+ return [] if files.empty?
+
+ # We only want to take the files that changed. Archive will do that when passed
+ # the filenames. It will export these to a tmp dir
+ system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
+ return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
+ end
+end
+
+if __FILE__ == $0
+ while data = STDIN.gets
+ oldrev, newrev, refname = data.split
+ gc = GitCommit.new(oldrev, newrev, refname)
+
+ results = []
+ gc.get_file_changes().each do |file|
+ begin
+ puts "++++++ Received: #{file}"
+
+ #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
+
+ # skip readme, other files, etc
+ next unless YamlValidate.yaml_file?(file)
+
+ results << YamlValidate.valid_yaml?(file)
+ rescue Exception => ex
+ puts "\n#{ex.message}\n\n"
+ results << false
+ end
+ end
+
+ #puts "RESULTS\n#{results.inspect}\n"
+ exit 1 if results.include?(false)
+ end
+end
diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/aws/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/aws/ec2.ini b/inventory/aws/hosts/ec2.ini
index eaab0a410..eaab0a410 100644
--- a/inventory/aws/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
diff --git a/inventory/aws/ec2.py b/inventory/aws/hosts/ec2.py
index f231ff4c2..f231ff4c2 100755
--- a/inventory/aws/ec2.py
+++ b/inventory/aws/hosts/ec2.py
diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/aws/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/byo/group_vars/all b/inventory/byo/group_vars/all
deleted file mode 100644
index d63e96668..000000000
--- a/inventory/byo/group_vars/all
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# lets assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# Use latest Errata puddle as an additional repo:
-#openshift_additional_repos:
-#- id: ose-devel
-# name: ose-devel
-# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-# enabled: 1
-# gpgcheck: 0
-
-# Use latest Whitelist puddle as an additional repo:
-openshift_additional_repos:
-- id: ose-devel
- name: ose-devel
- baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
- enabled: 1
- gpgcheck: 0
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
index 2dd854778..98dbb4fd8 100644
--- a/inventory/byo/hosts
+++ b/inventory/byo/hosts
@@ -1,5 +1,30 @@
# This is an example of a bring your own (byo) host inventory
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
# host group for masters
[masters]
ose3-master-ansible.test.example.com
@@ -7,4 +32,3 @@ ose3-master-ansible.test.example.com
# host group for nodes
[nodes]
ose3-node[1:2]-ansible.test.example.com
-
diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/gce/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/gce/gce.py b/inventory/gce/hosts/gce.py
index 3403f735e..3403f735e 100755
--- a/inventory/gce/gce.py
+++ b/inventory/gce/hosts/gce.py
diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/gce/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all
deleted file mode 100644
index b22da00de..000000000
--- a/inventory/libvirt/group_vars/all
+++ /dev/null
@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root
diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts
deleted file mode 100644
index 6a818f268..000000000
--- a/inventory/libvirt/hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts
new file mode 100644
index 000000000..34a4396bd
--- /dev/null
+++ b/inventory/libvirt/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini
new file mode 100644
index 000000000..62ff204dd
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt.ini
@@ -0,0 +1,20 @@
+# Ansible libvirt external inventory script settings
+#
+
+[libvirt]
+
+uri = qemu:///system
+
+# API calls to libvirt can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-libvirt.cache
+# - ansible-libvirt.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
new file mode 100755
index 000000000..4652f112e
--- /dev/null
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python2
+
+"""
+libvirt external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+"""
+
+# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import argparse
+import ConfigParser
+import os
+import re
+import sys
+from time import time
+import libvirt
+import xml.etree.ElementTree as ET
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class LibvirtInventory(object):
+
+ def __init__(self):
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ if self.args.host:
+ print self.json_format_dict(self.get_host_info(), self.args.pretty)
+ elif self.args.list:
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+ else: # default action with no options
+ print self.json_format_dict(self.get_inventory(), self.args.pretty)
+
+ def read_settings(self):
+ config = ConfigParser.SafeConfigParser()
+ config.read(
+ os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
+ )
+ self.libvirt_uri = config.get('libvirt', 'uri')
+
+ def parse_cli_args(self):
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on libvirt'
+ )
+ parser.add_argument(
+ '--list',
+ action='store_true',
+ default=True,
+ help='List instances (default: True)'
+ )
+ parser.add_argument(
+ '--host',
+ action='store',
+ help='Get all the variables about a specific instance'
+ )
+ parser.add_argument(
+ '--pretty',
+ action='store_true',
+ default=False,
+ help='Pretty format (default: False)'
+ )
+ self.args = parser.parse_args()
+
+ def get_host_info(self):
+ inventory = self.get_inventory()
+ if self.args.host in inventory['_meta']['hostvars']:
+ return inventory['_meta']['hostvars'][self.args.host]
+
+ def get_inventory(self):
+ inventory = dict(_meta=dict(hostvars=dict()))
+
+ conn = libvirt.openReadOnly(self.libvirt_uri)
+ if conn is None:
+ print "Failed to open connection to %s" % libvirt_uri
+ sys.exit(1)
+
+ domains = conn.listAllDomains()
+ if domains is None:
+ print "Failed to list domains for connection %s" % libvirt_uri
+ sys.exit(1)
+
+ arp_entries = self.parse_arp_entries()
+
+ for domain in domains:
+ hostvars = dict(libvirt_name=domain.name(),
+ libvirt_id=domain.ID(),
+ libvirt_uuid=domain.UUIDString())
+ domain_name = domain.name()
+
+ # TODO: add support for guests that are not in a running state
+ state, _ = domain.state()
+ # 2 is the state for a running guest
+ if state != 1:
+ continue
+
+ hostvars['libvirt_status'] = 'running'
+
+ root = ET.fromstring(domain.XMLDesc())
+ ns = {'ansible': 'https://github.com/ansible/ansible'}
+ for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns):
+ tag = tag_elem.text
+ self.push(inventory, "tag_%s" % tag, domain_name)
+ self.push(hostvars, 'libvirt_tags', tag)
+
+ # TODO: support more than one network interface, also support
+ # interface types other than 'network'
+ interface = root.find("./devices/interface[@type='network']")
+ if interface is not None:
+ mac_elem = interface.find('mac')
+ if mac_elem is not None:
+ mac = mac_elem.get('address')
+ if mac in arp_entries:
+ ip_address = arp_entries[mac]['ip_address']
+ hostvars['ansible_ssh_host'] = ip_address
+ hostvars['libvirt_ip_address'] = ip_address
+
+ inventory['_meta']['hostvars'][domain_name] = hostvars
+
+ return inventory
+
+ def parse_arp_entries(self):
+ arp_entries = dict()
+ with open('/proc/net/arp', 'r') as f:
+ # throw away the header
+ f.readline()
+
+ for line in f:
+ ip_address, _, _, mac, _, device = line.strip().split()
+ arp_entries[mac] = dict(ip_address=ip_address, device=device)
+
+ return arp_entries
+
+ def push(self, my_dict, key, element):
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def json_format_dict(self, data, pretty=False):
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+LibvirtInventory()
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 26c09d712..b839a33ea 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -1,22 +1,29 @@
#!/usr/bin/env python2
+'''
+ Fetch and combine multiple ec2 account settings into a single
+ json hash.
+'''
# vim: expandtab:tabstop=4:shiftwidth=4
from time import time
import argparse
import yaml
import os
-import sys
-import pdb
import subprocess
import json
-import pprint
CONFIG_FILE_NAME = 'multi_ec2.yaml'
class MultiEc2(object):
+ '''
+ MultiEc2 class:
+ Opens a yaml config file and reads aws credentials.
+ Stores a json hash of resources in result.
+ '''
def __init__(self):
+ self.args = None
self.config = None
self.all_ec2_results = {}
self.result = {}
@@ -24,7 +31,7 @@ class MultiEc2(object):
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
- etc_dir_config_file = os.path.join(os.path.sep, 'etc','ansible', CONFIG_FILE_NAME)
+ etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
# Prefer a file in the same directory, fall back to a file in etc
if os.path.isfile(same_dir_config_file):
@@ -39,12 +46,13 @@ class MultiEc2(object):
# load yaml
if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
- elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+ elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+ os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
self.config = {}
self.config['accounts'] = [
{
'name': 'default',
- 'provider': 'aws/ec2.py',
+ 'provider': 'aws/hosts/ec2.py',
'env_vars': {
'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
@@ -56,13 +64,9 @@ class MultiEc2(object):
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
- if self.args.cache_only:
- # get data from disk
- result = self.get_inventory_from_cache()
-
- if not result:
- self.get_inventory()
- self.write_to_cache()
+ if self.args.refresh_cache:
+ self.get_inventory()
+ self.write_to_cache()
# if its a host query, fetch and do not cache
elif self.args.host:
self.get_inventory()
@@ -74,7 +78,7 @@ class MultiEc2(object):
# get data from disk
self.get_inventory_from_cache()
- def load_yaml_config(self,conf_file=None):
+ def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the
respective cloud for inventory.
"""
@@ -88,7 +92,7 @@ class MultiEc2(object):
return config
- def get_provider_tags(self,provider, env={}):
+ def get_provider_tags(self, provider, env=None):
"""Call <provider> and query all of the tags that are usuable
by ansible. If environment is empty use the default env.
"""
@@ -153,7 +157,8 @@ class MultiEc2(object):
self.all_ec2_results[result['name']] = json.loads(result['out'])
values = self.all_ec2_results.values()
values.insert(0, self.result)
- [MultiEc2.merge_destructively(self.result, x) for x in values]
+ for result in values:
+ MultiEc2.merge_destructively(self.result, result)
else:
# For any 0 result, return it
count = 0
@@ -165,30 +170,30 @@ class MultiEc2(object):
raise RuntimeError("Found > 1 results for --host %s. \
This is an invalid state." % self.args.host)
@staticmethod
- def merge_destructively(a, b):
- "merges b into a"
- for key in b:
- if key in a:
- if isinstance(a[key], dict) and isinstance(b[key], dict):
- MultiEc2.merge_destructively(a[key], b[key])
- elif a[key] == b[key]:
+ def merge_destructively(input_a, input_b):
+ "merges b into input_a"
+ for key in input_b:
+ if key in input_a:
+ if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+ MultiEc2.merge_destructively(input_a[key], input_b[key])
+ elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
- elif isinstance(a[key], list) and isinstance(b[key],list):
- for x in b[key]:
- if x not in a[key]:
- a[key].append(x)
+ elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+ for result in input_b[key]:
+ if result not in input_a[key]:
+ input_a[key].input_append(result)
# a is a list and not b
- elif isinstance(a[key], list):
- if b[key] not in a[key]:
- a[key].append(b[key])
- elif isinstance(b[key], list):
- a[key] = [a[key]] + [k for k in b[key] if k != a[key]]
+ elif isinstance(input_a[key], list):
+ if input_b[key] not in input_a[key]:
+ input_a[key].append(input_b[key])
+ elif isinstance(input_b[key], list):
+ input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
- a[key] = [a[key],b[key]]
+ input_a[key] = [input_a[key], input_b[key]]
else:
- a[key] = b[key]
- return a
+ input_a[key] = input_b[key]
+ return input_a
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@@ -204,19 +209,20 @@ class MultiEc2(object):
def parse_cli_args(self):
''' Command line argument processing '''
- parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on a provider')
- parser.add_argument('--cache-only', action='store_true', default=False,
- help='Fetch cached only instances (default: False)')
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on a provider')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
- help='List instances (default: True)')
+ help='List instances (default: True)')
parser.add_argument('--host', action='store', default=False,
- help='Get all the variables about a specific instance')
+ help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def write_to_cache(self):
''' Writes data in JSON format to a file '''
- json_data = self.json_format_dict(self.result, True)
+ json_data = MultiEc2.json_format_dict(self.result, True)
with open(self.cache_path, 'w') as cache:
cache.write(json_data)
@@ -232,7 +238,8 @@ class MultiEc2(object):
return True
- def json_format_dict(self, data, pretty=False):
+ @classmethod
+ def json_format_dict(cls, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
@@ -242,9 +249,9 @@ class MultiEc2(object):
return json.dumps(data)
def result_str(self):
+ '''Return cache string stored in self.result'''
return self.json_format_dict(self.result, True)
if __name__ == "__main__":
- mi = MultiEc2()
- print mi.result_str()
+ print MultiEc2().result_str()
diff --git a/inventory/multi_ec2.yaml.example b/inventory/multi_ec2.yaml.example
index 0bd505816..91e7c7970 100644
--- a/inventory/multi_ec2.yaml.example
+++ b/inventory/multi_ec2.yaml.example
@@ -1,13 +1,13 @@
# multi ec2 inventory configs
accounts:
- name: aws1
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- name: aws2
- provider: aws/ec2.py
+ provider: aws/hosts/ec2.py
env_vars:
AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
new file mode 100644
index 000000000..b8961704e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 3561c1803..3eb5496e4 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -4,59 +4,27 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml
deleted file mode 100644
index 9d645fbe5..000000000
--- a/playbooks/aws/openshift-cluster/launch_instances.yml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- set_fact:
- machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
- machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
- machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
- machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
- created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
- security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
- env: "{{ cluster }}"
- host_type: "{{ type }}"
- env_host_type: "{{ cluster }}-openshift-{{ type }}"
-
-- name: Launch instance(s)
- ec2:
- state: present
- region: "{{ machine_region }}"
- keypair: "{{ machine_keypair }}"
- group: "{{ security_group }}"
- instance_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
- count: "{{ instances | oo_len }}"
- wait: yes
- instance_tags:
- created-by: "{{ created_by }}"
- env: "{{ env }}"
- host-type: "{{ host_type }}"
- env-host-type: "{{ env_host_type }}"
- register: ec2
-
-- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
- with_together:
- - instances
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
-- set_fact:
- instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
-
-- name: Add new instances groups and variables
- add_host:
- hostname: "{{ item.0 }}"
- ansible_ssh_host: "{{ item.1.dns_name }}"
- groups: "{{ instance_groups }}"
- ec2_private_ip_address: "{{ item.1.private_ip }}"
- ec2_ip_address: "{{ item.1.public_ip }}"
- with_together:
- - instances
- - ec2.instances
-
-- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
- with_items: ec2.instances
-
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
new file mode 100644
index 000000000..29e594a65
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+ - Returns list of matching AMIs with AMI ID, along with other useful information
+ - Can search AMIs with different owners
+ - Can search by matching tag(s), by AMI name and/or other criteria
+ - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+ - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+ - See the example below for a suggestion of how to search by distro/release.
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: [ 'aws_region', 'ec2_region' ]
+ owner:
+ description:
+ - Search AMIs owned by the specified owner
+ - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+ - If not specified, all EC2 AMIs in the specified region will be searched.
+ - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+ required: false
+ default: null
+ ami_id:
+ description:
+ - An AMI ID to match.
+ default: null
+ required: false
+ ami_tags:
+ description:
+ - A hash/dictionary of tags to match for the AMI.
+ default: null
+ required: false
+ architecture:
+ description:
+ - An architecture type to match (e.g. x86_64).
+ default: null
+ required: false
+ hypervisor:
+ description:
+ - A hypervisor type type to match (e.g. xen).
+ default: null
+ required: false
+ is_public:
+ description:
+ - Whether or not the image(s) are public.
+ choices: ['yes', 'no']
+ default: null
+ required: false
+ name:
+ description:
+ - An AMI name to match.
+ default: null
+ required: false
+ platform:
+ description:
+ - Platform type to match.
+ default: null
+ required: false
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ - If specifying 'tag', the 'tag_name' parameter is required.
+ choices: ['name', 'description', 'tag']
+ default: null
+ required: false
+ sort_tag:
+ description:
+ - Tag name with which to sort results.
+ - Required when specifying 'sort=tag'.
+ default: null
+ required: false
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ state:
+ description:
+ - AMI state to match.
+ default: 'available'
+ required: false
+ virtualization_type:
+ description:
+ - Virtualization type to match (e.g. hvm).
+ default: null
+ required: false
+ no_result_action:
+ description:
+ - What to do when no results are found.
+ - "'success' reports success and returns an empty array"
+ - "'fail' causes the module to report failure"
+ choices: ['success', 'fail']
+ default: 'success'
+ required: false
+requirements:
+ - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+ owner: self
+ tags:
+ project: website
+ no_result_action: fail
+ register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+ name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+ owner: 099720109477
+ sort: name
+ sort_order: descending
+ sort_end: 1
+ register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+ image: "{{ ami_search.results[0].ami_id }}"
+ instance_type: m3.medium
+ key_name: mykey
+ wait: yes
+'''
+
+try:
+ import boto.ec2
+ HAS_BOTO=True
+except ImportError:
+ HAS_BOTO=False
+
+import json
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region = dict(required=True,
+ aliases = ['aws_region', 'ec2_region']),
+ owner = dict(required=False, default=None),
+ ami_id = dict(required=False),
+ ami_tags = dict(required=False, type='dict',
+ aliases = ['search_tags', 'image_tags']),
+ architecture = dict(required=False),
+ hypervisor = dict(required=False),
+ is_public = dict(required=False),
+ name = dict(required=False),
+ platform = dict(required=False),
+ sort = dict(required=False, default=None,
+ choices=['name', 'description', 'tag']),
+ sort_tag = dict(required=False),
+ sort_order = dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start = dict(required=False),
+ sort_end = dict(required=False),
+ state = dict(required=False, default='available'),
+ virtualization_type = dict(required=False),
+ no_result_action = dict(required=False, default='success',
+ choices = ['success', 'fail']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+ ami_id = module.params.get('ami_id')
+ ami_tags = module.params.get('ami_tags')
+ architecture = module.params.get('architecture')
+ hypervisor = module.params.get('hypervisor')
+ is_public = module.params.get('is_public')
+ name = module.params.get('name')
+ owner = module.params.get('owner')
+ platform = module.params.get('platform')
+ sort = module.params.get('sort')
+ sort_tag = module.params.get('sort_tag')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+ state = module.params.get('state')
+ virtualization_type = module.params.get('virtualization_type')
+ no_result_action = module.params.get('no_result_action')
+
+ filter = {'state': state}
+
+ if ami_id:
+ filter['image_id'] = ami_id
+ if ami_tags:
+ for tag in ami_tags:
+ filter['tag:'+tag] = ami_tags[tag]
+ if architecture:
+ filter['architecture'] = architecture
+ if hypervisor:
+ filter['hypervisor'] = hypervisor
+ if is_public:
+ filter['is_public'] = is_public
+ if name:
+ filter['name'] = name
+ if platform:
+ filter['platform'] = platform
+ if virtualization_type:
+ filter['virtualization_type'] = virtualization_type
+
+ ec2 = ec2_connect(module)
+
+ images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+ if no_result_action == 'fail' and len(images_result) == 0:
+ module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+ results = []
+ for image in images_result:
+ data = {
+ 'ami_id': image.id,
+ 'architecture': image.architecture,
+ 'description': image.description,
+ 'is_public': image.is_public,
+ 'name': image.name,
+ 'owner_id': image.owner_id,
+ 'platform': image.platform,
+ 'root_device_name': image.root_device_name,
+ 'root_device_type': image.root_device_type,
+ 'state': image.state,
+ 'tags': image.tags,
+ 'virtualization_type': image.virtualization_type,
+ }
+
+ if image.kernel_id:
+ data['kernel_id'] = image.kernel_id
+ if image.ramdisk_id:
+ data['ramdisk_id'] = image.ramdisk_id
+
+ results.append(data)
+
+ if sort == 'tag':
+ if not sort_tag:
+ module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+ results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+ elif sort:
+ results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+ try:
+ if sort and sort_start and sort_end:
+ results = results[int(sort_start):int(sort_end)]
+ elif sort and sort_start:
+ results = results[int(sort_start):]
+ elif sort and sort_end:
+ results = results[:int(sort_end)]
+ except TypeError:
+ module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 08e9e2df4..04fcdc0a1 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost'])
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..666a8d1fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,132 @@
+---
+- set_fact:
+ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+ docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
+ env: "{{ cluster }}"
+ env_host_type: "{{ cluster }}-openshift-{{ type }}"
+ host_type: "{{ type }}"
+
+- set_fact:
+ ec2_region: "{{ lookup('env', 'ec2_region')
+ | default(deployment_vars[deployment_type].region, true) }}"
+ when: ec2_region is not defined
+- set_fact:
+ ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+ | default(deployment_vars[deployment_type].image_name, true) }}"
+ when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+ ec2_image: "{{ lookup('env', 'ec2_image')
+ | default(deployment_vars[deployment_type].image, true) }}"
+ when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+ ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+ | default(deployment_vars[deployment_type].type, true) }}"
+ when: ec2_instance_type is not defined
+- set_fact:
+ ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+ | default(deployment_vars[deployment_type].keypair, true) }}"
+ when: ec2_keypair is not defined
+- set_fact:
+ ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+ | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+ when: ec2_vpc_subnet is not defined
+- set_fact:
+ ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+ | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+ when: ec2_assign_public_ip is not defined
+- set_fact:
+ ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+ | default(deployment_vars[deployment_type].security_groups, true) }}"
+ when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+ ec2_ami_find:
+ region: "{{ ec2_region }}"
+ ami_id: "{{ ec2_image | default(omit, true) }}"
+ name: "{{ ec2_image_name | default(omit, true) }}"
+ register: ami_result
+
+- fail: msg="Could not find requested ami"
+ when: not ami_result.results
+
+- set_fact:
+ latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+ volume_defs:
+ master:
+ root:
+ volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+ node:
+ root:
+ volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+ device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
+ docker:
+ volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
+ device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
+
+- set_fact:
+ volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
+
+- name: Launch instance(s)
+ ec2:
+ state: present
+ region: "{{ ec2_region }}"
+ keypair: "{{ ec2_keypair }}"
+ group: "{{ ec2_security_groups }}"
+ instance_type: "{{ ec2_instance_type }}"
+ image: "{{ latest_ami }}"
+ count: "{{ instances | oo_len }}"
+ vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+ assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+ user_data: "{{ user_data }}"
+ wait: yes
+ instance_tags:
+ created-by: "{{ created_by }}"
+ env: "{{ env }}"
+ host-type: "{{ host_type }}"
+ env-host-type: "{{ env_host_type }}"
+ volumes: "{{ volumes }}"
+ register: ec2
+
+- name: Add Name tag to instances
+ ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+ with_together:
+ - instances
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+- set_fact:
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: "{{ instance_groups }}"
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ with_together:
+ - instances
+ - ec2.instances
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.dns_name }}"
+ with_items: ec2.instances
+
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_together:
+ - instances
+ - ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
new file mode 100644
index 000000000..7dbc8f552
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -0,0 +1,29 @@
+#cloud-config
+yum_repos:
+ jdetiber-copr:
+ name: Copr repo for origin owned by jdetiber
+ baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
+ skip_if_unavailable: true
+ gpgcheck: true
+ gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
+ enabled: true
+
+packages:
+- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
+- docker-storage-setup
+
+mounts:
+- [ xvdb ]
+- [ ephemeral0 ]
+
+write_files:
+- content: |
+ DEVS=/dev/xvdb
+ VG=docker_vg
+ path: /etc/sysconfig/docker-storage-setup
+ owner: root:root
+ permissions: '0644'
+
+runcmd:
+- systemctl daemon-reload
+- systemctl enable lvm2-lvmetad.service docker-storage-setup.service
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 39607633a..617d0d456 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -1,14 +1,16 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
-
-- include: ../openshift-node/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-- include: ../openshift-master/terminate.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 90ecdc6ab..5e7ab4e58 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env_{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.defaults.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
new file mode 100644
index 000000000..12f79a9c1
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ed97d539c..07e453f89 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -1 +1,38 @@
---
+deployment_vars:
+ origin:
+ # fedora, since centos requires marketplace
+ image: ami-acd999c4
+ image_name:
+ region: us-east-1
+ ssh_user: fedora
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ online:
+ # private ami
+ image: ami-7a9e9812
+ image_name: openshift-rhel7_*
+ region: us-east-1
+ ssh_user: root
+ sudo: no
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
+ enterprise:
+ # rhel-7.1, requires cloud access subscription
+ image: ami-10663b78
+ image_name:
+ region: us-east-1
+ ssh_user: ec2-user
+ sudo: yes
+ keypair: libra
+ type: m3.large
+ security_groups: [ 'public' ]
+ vpc_subnet:
+ assign_public_ip:
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 1c4060eee..37ab4fbe6 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,24 +1,19 @@
---
-- name: Populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: Configure instances
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- openshift_use_openshift_sdn: False
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- #- openshift_sdn_master
- - pods
- - os_env_extras
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index 3d87879a0..6b3751682 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -40,7 +38,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -57,7 +55,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
index fd15cf00f..07d9961bc 100644
--- a/playbooks/aws/openshift-master/terminate.yml
+++ b/playbooks/aws/openshift-master/terminate.yml
@@ -1,52 +1,2 @@
---
-- name: Populate oo_masters_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
- hosts: oo_masters_to_terminate
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_terminate']) }}"
- tasks:
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: host_vars
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
- with_items: ec2_term.results
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: item.failed
- with_items: ec2_term.results
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
-
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index b08ed7571..fc9b397b4 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,107 +1,25 @@
---
-- name: Populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ec2_private_ip_address }}"
- public_hostname: "{{ ec2_ip_address }}"
- # TODO: this should be removed once openshift-sdn packages are available
- use_openshift_sdn: False
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- #- openshift_sdn_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index b7ef593e7..36aee14ff 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -4,14 +4,12 @@
connection: local
gather_facts: no
+# TODO: modify atomic_ami based on deployment_type
vars:
inst_region: us-east-1
atomic_ami: ami-86781fee
user_data_file: user_data.txt
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
ec2:
@@ -33,7 +31,7 @@
with_items: ec2.instances
- name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
with_together:
- oo_new_inst_names
- ec2.instances
@@ -42,7 +40,7 @@
Name: "{{ item.0 }}"
- name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
with_items: ec2.instances
args:
tags: "{{ oo_new_inst_tags }}"
@@ -59,7 +57,7 @@
- ec2.instances
- name: Wait for ssh
- wait_for: "port=22 host={{ item.dns_name }}"
+ wait_for: port=22 host={{ item.dns_name }}
with_items: ec2.instances
- name: Wait for root user setup
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
index 1c0c77eb7..07d9961bc 100644
--- a/playbooks/aws/openshift-node/terminate.yml
+++ b/playbooks/aws/openshift-node/terminate.yml
@@ -1,52 +1,2 @@
---
-- name: Populate oo_nodes_to_terminate host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
- hosts: oo_nodes_to_terminate
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- gather_facts: no
- vars:
- host_vars: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
- tasks:
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ item.ec2_id }}"]
- region: "{{ item.ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: host_vars
-
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
- when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
- with_items: ec2_term.results
-
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: item.failed
- with_items: ec2_term.results
-
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
-
+- include: ../terminate.yml
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/aws/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml
new file mode 100644
index 000000000..e9767b260
--- /dev/null
+++ b/playbooks/aws/terminate.yml
@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_terminate host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_hosts_to_terminate
+ add_host: name={{ item }} groups=oo_hosts_to_terminate
+ with_items: oo_host_group_exp | default([])
+
+- name: Gather dynamic inventory variables for hosts to terminate
+ hosts: oo_hosts_to_terminate
+ gather_facts: no
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+ tasks:
+ - name: Remove tags from instances
+ ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+ args:
+ tags:
+ env: "{{ item['ec2_tag_env'] }}"
+ host-type: "{{ item['ec2_tag_host-type'] }}"
+ env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+ when: "'oo_hosts_to_terminate' in groups"
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+ when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: item.failed
+ with_items: ec2_term.results
+ when: "'oo_hosts_to_terminate' in groups"
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+ when: "'oo_hosts_to_terminate' in groups"
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
index 706f9285c..f61d277c6 100644
--- a/playbooks/byo/openshift-master/config.yml
+++ b/playbooks/byo/openshift-master/config.yml
@@ -1,9 +1,15 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ with_items: groups['masters']
-- name: Configure master instances
- hosts: masters
- roles:
- - openshift_master
- - openshift_sdn_master
+- include: ../../common/openshift-master/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
index 69ad7a840..d569827b4 100644
--- a/playbooks/byo/openshift-node/config.yml
+++ b/playbooks/byo/openshift-node/config.yml
@@ -1,79 +1,21 @@
---
-- name: Gather facts for node hosts
- hosts: nodes
- roles:
- - openshift_facts
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+ hosts: localhost
+ gather_facts: no
tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: 'node'
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ with_items: groups.nodes
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups.masters[0] }}"
+ groups: oo_first_master
-- name: Register nodes
- hosts: masters[0]
+- include: ../../common/openshift-node/config.yml
vars:
- openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure node instances
- hosts: nodes
- vars:
- sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - openshift_sdn_node
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
new file mode 100644
index 000000000..cd282270f
--- /dev/null
+++ b/playbooks/byo/openshift_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+ hosts: all
+ gather_facts: no
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ register: result
+ - debug: var=result
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
new file mode 100644
index 000000000..14ffa928f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml
diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-master/filter_plugins
+++ b/playbooks/common/openshift-cluster/filter_plugins
diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/common/openshift-cluster/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/libvirt/openshift-master/roles
+++ b/playbooks/common/openshift-cluster/roles
diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
new file mode 100644
index 000000000..118727273
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
new file mode 100644
index 000000000..162315d46
--- /dev/null
+++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
new file mode 100644
index 000000000..e92c6f1ee
--- /dev/null
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
new file mode 100644
index 000000000..05822d118
--- /dev/null
+++ b/playbooks/common/openshift-master/config.yml
@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+ roles:
+ - openshift_master
+ - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+ hosts: oo_masters_deployment_type_online
+ roles:
+ - pods
+ - os_env_extras
diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/common/openshift-master/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/libvirt/openshift-node/filter_plugins
+++ b/playbooks/common/openshift-master/filter_plugins
diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
new file mode 100644
index 000000000..5a6c89489
--- /dev/null
+++ b/playbooks/common/openshift-node/config.yml
@@ -0,0 +1,127 @@
+---
+- name: Gather and set facts for node hosts
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ # TODO: update so that we only sync necessary configs/directories, currently
+ # we sync for all nodes in oo_nodes_to_config. We will need to inspect the
+ # configs on the nodes to make the determination on whether to sync or not.
+ - name: Create the temp directory on the master
+ file:
+ path: "{{ sync_tmpdir }}"
+ owner: "{{ ansible_ssh_user }}"
+ mode: 0700
+ state: directory
+ changed_when: False
+
+ - name: Create a tarball of the node config directories
+ command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+ args:
+ chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: openshift_nodes
+ changed_when: False
+
+ - name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: openshift_nodes
+ changed_when: False
+
+
+- name: Configure node instances
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+ pre_tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ roles:
+ - openshift_node
+ - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+ tasks:
+ - name: Create group for deployment type
+ group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+ changed_when: False
+
+- name: Delete the temporary directory on the master
+ hosts: oo_first_master
+ gather_facts: no
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - file: name={{ sync_tmpdir }} state=absent
+ changed_when: False
+
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - file: name={{ mktemp.stdout }} state=absent
+ changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+ hosts: oo_nodes_deployment_type_online
+ gather_facts: no
+ roles:
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
new file mode 100644
index 000000000..8b8490246
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 14cdd2537..771f51e91 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -4,59 +4,25 @@
connection: local
gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
tasks:
- - set_fact: k8s_type="master"
-
- - name: Generate master instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: master_names_output
- with_sequence: start=1 end={{ num_masters }}
-
- # These set_fact's cannot be combined
- - set_fact:
- master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- master_names: "{{ master_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ master_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
- - set_fact: k8s_type="node"
-
- - name: Generate node instance names(s)
- set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
- register: node_names_output
- with_sequence: start=1 end={{ num_nodes }}
-
- # These set_fact's cannot be combined
- - set_fact:
- node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
- - set_fact:
- node_names: "{{ node_names_string.strip().split(' ') }}"
-
- - include: launch_instances.yml
- vars:
- instances: "{{ node_names }}"
- cluster: "{{ cluster_id }}"
- type: "{{ k8s_type }}"
-
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+ - fail: msg="Deployment type not supported for gce provider yet"
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 1124b0ea3..962381306 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -2,16 +2,23 @@
- name: Generate oo_list_hosts group
hosts: localhost
gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- set_fact: scratch_group=tag_env-{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
- when: scratch_group is not defined
- - add_host: name={{ item }} groups=oo_list_hosts
- with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- name: List Hosts
hosts: oo_list_hosts
gather_facts: no
tasks:
- - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"
+ - debug:
+ msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index b4f33bd87..9a9848f05 100644
--- a/playbooks/gce/openshift-cluster/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -2,41 +2,38 @@
# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
# the gce task to use the disk_auto_delete parameter to avoid having to delete
# the disk as a separate step on termination
-
-- set_fact:
- machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
- machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
-
- name: Launch instance(s)
gce:
instance_names: "{{ instances }}"
- machine_type: "{{ machine_type }}"
- image: "{{ machine_image }}"
+ machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+ image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
tags:
- - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
- - "env-{{ cluster }}"
- - "host-type-{{ type }}"
- - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+ - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - env-{{ cluster }}
+ - host-type-{{ type }}
+ - env-host-type-{{ cluster }}-openshift-{{ type }}
register: gce
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
-- name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
retries: 20
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 0281ae953..abe6a4c95 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,20 +1,34 @@
---
- name: Terminate instance(s)
hosts: localhost
-
+ gather_facts: no
vars_files:
- - vars.yml
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
- include: ../openshift-node/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
- include: ../openshift-master/terminate.yml
vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 973e4c3ef..9ebf39a13 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,13 +1,18 @@
---
-- hosts: "tag_env-{{ cluster_id }}"
- roles:
- - openshift_repos
- - os_update_latest
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+- include: config.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ed97d539c..ae33083b9 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1 +1,15 @@
---
+deployment_vars:
+ origin:
+ image: centos-7
+ ssh_user:
+ sudo: yes
+ online:
+ image: libra-rhel7
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image: rhel-7
+ ssh_user:
+ sudo: yes
+
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index 857da0763..af6000bc8 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,20 +1,18 @@
---
-- name: master/config.yml, populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
hosts: localhost
gather_facts: no
tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_masters_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
-- name: "Configure instances"
- hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index 287596002..ef10b6cf0 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 8319774f8..452ac5199 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_masters_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_masters_to_terminate
+ add_host: name={{ item }} groups=oo_masters_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate master instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_masters_to_terminate'] }}"
disks: "{{ groups['oo_masters_to_terminate'] }}"
register: gce
+ when: "'oo_masters_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_masters_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-master/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 771cc3a94..5b1601176 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,100 +1,24 @@
---
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_nodes_to_config"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
- - add_host:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: root
+ with_items: oo_host_group_exp | default([])
+ - name: Evaluate oo_first_master
+ add_host:
name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
groups: oo_first_master
- when: oo_host_group_exp is defined
+ ansible_ssh_user: root
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ gce_private_ip }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
+- include: ../../common/openshift-node/config.yml
vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 73d0478ab..086ba58bc 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -8,14 +8,12 @@
connection: local
gather_facts: no
+# TODO: modify image based on deployment_type
vars:
inst_names: "{{ oo_new_inst_names }}"
machine_type: n1-standard-1
image: libra-rhel7
- vars_files:
- - vars.yml
-
tasks:
- name: Launch instances
gce:
@@ -37,7 +35,7 @@
with_items: gce.instance_data
- name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
+ wait_for: port=22 host={{ item.public_ip }}
with_items: gce.instance_data
- name: Wait for root user setup
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 7d71dfcab..357e0c295 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -3,10 +3,9 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_nodes_to_terminate"
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
+ - name: Evaluate oo_nodes_to_terminate
+ add_host: name={{ item }} groups=oo_nodes_to_terminate
+ with_items: oo_host_group_exp | default([])
- name: Terminate node instances
hosts: localhost
@@ -22,6 +21,7 @@
instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
disks: "{{ groups['oo_nodes_to_terminate'] }}"
register: gce
+ when: "'oo_nodes_to_terminate' in groups"
- name: Remove disks of instances
gce_pd:
@@ -32,5 +32,4 @@
zone: "{{ gce.zone }}"
state: absent
with_items: gce.instance_names
-
-
+ when: "'oo_nodes_to_terminate' in groups"
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
deleted file mode 100644
index c196b2fca..000000000
--- a/playbooks/gce/openshift-node/vars.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
new file mode 100644
index 000000000..faf278b10
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_masters_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 6f2df33af..a7ddc1e7e 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -1,65 +1,36 @@
+---
- name: Launch instance(s)
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
-
vars_files:
- - vars.yml
-
+ - vars.yml
+ vars:
+ os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+ os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+ os_libvirt_network: "{{ libvirt_network | default('default') }}"
+ image_url: "{{ deployment_vars[deployment_type].image.url }}"
+ image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+ image_name: "{{ deployment_vars[deployment_type].image.name }}"
tasks:
- - set_fact:
- k8s_type: master
-
- - name: Generate master instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: master_names_output
- with_sequence: start=1 end='{{ num_masters }}'
+ - fail: msg="Deployment type not supported for libvirt provider yet"
+ when: deployment_type in ['online', 'enterprise']
- - set_fact:
- master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+ - include: tasks/configure_libvirt.yml
- - include: launch_instances.yml
- vars:
- instances: '{{ master_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
- group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - set_fact:
- k8s_type: node
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
- - name: Generate node instance name(s)
- set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
- register: node_names_output
- with_sequence: start=1 end='{{ num_nodes }}'
+- include: update.yml
- - set_fact:
- node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
-
- - include: launch_instances.yml
- vars:
- instances: '{{ node_names }}'
- cluster: '{{ cluster_id }}'
- type: '{{ k8s_type }}'
-
-- hosts: 'tag_env-{{ cluster_id }}'
- roles:
- - openshift_repos
- - os_update_latest
-
-- include: ../openshift-master/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
- oo_env: '{{ cluster_id }}'
-
-- include: ../openshift-node/config.yml
- vars:
- oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
- oo_env: '{{ cluster_id }}'
+- include: list.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6bf07e3c6..eaedc4d0d 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -1,43 +1,23 @@
+---
- name: Generate oo_list_hosts group
hosts: localhost
- connection: local
gather_facts: no
-
- vars:
- libvirt_uri: 'qemu:///system'
-
+ vars_files:
+ - vars.yml
tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
-
- - name: Collect MAC addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
- register: scratch_mac
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
-
- - name: Collect IP addresses of the VMs
- shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
- register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
- when: item.skipped is not defined
-
- - name: Add hosts
- add_host:
- hostname: '{{ item[0] }}'
- ansible_ssh_host: '{{ item[1].stdout }}'
- ansible_ssh_user: root
- groups: oo_list_hosts
- with_together:
- - '{{ list_vms.list_vms }}'
- - '{{ scratch_ip.results }}'
- when: item[1].skipped is not defined
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
hosts: oo_list_hosts
-
tasks:
- - debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
new file mode 100644
index 000000000..f237c1a60
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+ when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+ when: libvirt_network is defined
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
new file mode 100644
index 000000000..3117d9edc
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+ register: net_info_result
+ changed_when: False
+ failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+ command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
+ register: mktemp
+ when: net_info_result.rc == 1
+
+- name: Create network xml file
+ template:
+ src: templates/network.xml
+ dest: "{{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+ command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+ when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+ file:
+ path: "{{ mktemp.stdout }}"
+ state: absent
+ when: net_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
new file mode 100644
index 000000000..8a67d713f
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
@@ -0,0 +1,23 @@
+---
+- name: Create libvirt storage directory for openshift
+ file:
+ dest: "{{ libvirt_storage_pool_path }}"
+ state: directory
+
+- acl:
+ default: yes
+ entity: kvm
+ etype: group
+ name: "{{ libvirt_storage_pool_path }}"
+ permissions: rwx
+ state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+ command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+ register: pool_info_result
+ changed_when: False
+ failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ when: pool_info_result.rc == 1
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 3bbcae981..359d0b2f3 100644
--- a/playbooks/libvirt/openshift-cluster/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -1,45 +1,47 @@
-- name: Create the libvirt storage directory for openshift
- file:
- dest: '{{ libvirt_storage_pool_path }}'
- state: directory
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
- name: Download Base Cloud image
get_url:
- url: '{{ base_image_url }}'
- sha256sum: '{{ base_image_sha256 }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+ url: '{{ image_url }}'
+ sha256sum: '{{ image_sha256 }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
- name: Create the cloud-init config drive path
file:
- dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: directory
- with_items: '{{ instances }}'
+ with_items: instances
- name: Create the cloud-init config drive files
template:
src: '{{ item[1] }}'
- dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
with_nested:
- - '{{ instances }}'
+ - instances
- [ user-data, meta-data ]
- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
args:
- chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
- creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: '{{ instances }}'
-
-- name: Create the libvirt storage pool for openshift
- command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
- ignore_errors: yes
+ chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: instances
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
- name: Create VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
- with_items: '{{ instances }}'
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+ with_items: instances
- name: Create VMs
virt:
@@ -47,19 +49,19 @@
command: define
xml: "{{ lookup('template', '../templates/domain.xml') }}"
uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
+ with_items: instances
- name: Start VMs
virt:
name: '{{ item }}'
state: running
uri: '{{ libvirt_uri }}'
- with_items: '{{ instances }}'
+ with_items: instances
- name: Collect MAC addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
register: scratch_mac
- with_items: '{{ instances }}'
+ with_items: instances
- name: Wait for the VMs to get an IP
command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
@@ -72,7 +74,7 @@
- name: Collect IP addresses of the VMs
shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
register: scratch_ip
- with_items: '{{ scratch_mac.results }}'
+ with_items: scratch_mac.results
- set_fact:
ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
@@ -81,7 +83,8 @@
add_host:
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
- ansible_ssh_user: root
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
with_together:
- instances
@@ -93,10 +96,12 @@
port: 22
with_items: ips
-- name: Wait for root user setup
- command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+- name: Wait for openshift user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
register: result
until: result.rc == 0
retries: 30
delay: 1
- with_items: ips
+ with_together:
+ - instances
+ - ips
diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index da037d138..df200e374 100644
--- a/playbooks/libvirt/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -1,6 +1,13 @@
<domain type='kvm' id='8'>
<name>{{ item }}</name>
<memory unit='GiB'>1</memory>
+ <metadata xmlns:ansible="https://github.com/ansible/ansible">
+ <ansible:tags>
+ <ansible:tag>env-{{ cluster }}</ansible:tag>
+ <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+ <ansible:tag>host-type-{{ type }}</ansible:tag>
+ </ansible:tags>
+ </metadata>
<currentMemory unit='GiB'>1</currentMemory>
<vcpu placement='static'>2</vcpu>
<os>
@@ -24,18 +31,18 @@
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
- <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
<target dev='vdb' bus='virtio'/>
<readonly/>
</disk>
<controller type='usb' index='0' />
<interface type='network'>
- <source network='default'/>
+ <source network='{{ os_libvirt_network }}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
@@ -49,7 +56,6 @@
</channel>
<input type='tablet' bus='usb' />
<input type='mouse' bus='ps2'/>
- <input type='keyboard' bus='ps2'/>
<graphics type='spice' autoport='yes' />
<video>
<model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data
new file mode 100644
index 000000000..6b421770d
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/meta-data
@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
new file mode 100644
index 000000000..86dcd62bb
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -0,0 +1,23 @@
+<network>
+ <name>openshift-ansible</name>
+ <forward mode='nat'>
+ <nat>
+ <port start='1024' end='65535'/>
+ </nat>
+ </forward>
+ <!-- TODO: query for first available virbr interface available -->
+ <bridge name='virbr3' stp='on' delay='0'/>
+ <!-- TODO: make overridable -->
+ <domain name='example.com'/>
+ <dns>
+ <!-- TODO: automatically add host entries -->
+ </dns>
+ <!-- TODO: query for available address space -->
+ <ip address='192.168.55.1' netmask='255.255.255.0'>
+ <dhcp>
+ <range start='192.168.55.2' end='192.168.55.254'/>
+ <!-- TODO: add static entries addresses for the hosts to be created -->
+ </dhcp>
+ </ip>
+</network>
+
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
new file mode 100644
index 000000000..77b788109
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+ - default
+ - name: root
+ ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+ - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index c609169d3..b173a09dd 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,41 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
- name: Terminate instance(s)
hosts: localhost
- connection: local
gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: cluster_group=tag_env-{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
- vars:
- libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
- libvirt_storage_pool: 'openshift'
- libvirt_uri: 'qemu:///system'
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - groups['oo_hosts_to_terminate']
+ - [ destroy, undefine ]
- tasks:
- - name: List VMs
- virt:
- command: list_vms
- register: list_vms
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
- - name: Destroy VMs
- virt:
- name: '{{ item[0] }}'
- command: '{{ item[1] }}'
- uri: '{{ libvirt_uri }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ destroy, undefine ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Delete the VM cloud-init image
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs config drive
- file:
- path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
- state: absent
- with_items: '{{ list_vms.list_vms }}'
- when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+ - name: Remove the cloud-init config directory
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ state: absent
+ with_items: groups['oo_hosts_to_terminate']
- - name: Delete VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
- args:
- removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
- with_nested:
- - '{{ list_vms.list_vms }}'
- - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
- when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
new file mode 100644
index 000000000..57e36db9e
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4e4eecd46..65d954fee 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -1,7 +1,33 @@
-# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
-base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
-base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
-base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+deployment_vars:
+ origin:
+ image:
+ url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+ name: CentOS-7-x86_64-GenericCloud.qcow2
+ sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+ ssh_user: openshift
+ sudo: yes
+ online:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image:
+ url:
+ name:
+ sha256:
+ ssh_user: openshift
+ sudo: yes
+# origin:
+# fedora:
+# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml
deleted file mode 100644
index dd95fd57f..000000000
--- a/playbooks/libvirt/openshift-master/config.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-- name: master/config.yml, populate oo_masters_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_masters_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
-- name: Configure instances
- hosts: oo_masters_to_config
- vars:
- openshift_hostname: '{{ ansible_default_ipv4.address }}'
- vars_files:
- - vars.yml
- roles:
- - openshift_master
- - pods
- - os_env_extras
diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-master/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml
deleted file mode 100644
index 3244a8046..000000000
--- a/playbooks/libvirt/openshift-node/config.yml
+++ /dev/null
@@ -1,102 +0,0 @@
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - name: "Evaluate oo_host_group_exp if it's set"
- add_host:
- name: '{{ item }}'
- groups: oo_nodes_to_config
- with_items: "{{ oo_host_group_exp | default('') }}"
- when: oo_host_group_exp is defined
-
- - add_host:
- name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
- groups: oo_first_master
- when: oo_host_group_exp is defined
-
-
-- name: Gather and set facts for hosts to configure
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ ansible_default_ipv4.address }}"
- - role: node
- local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
- resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
- resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
- pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
- labels: "{{ openshfit_node_labels | default(None) }}"
- annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']) }}"
- roles:
- - openshift_register_nodes
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
- - name: Sync master certs to localhost
- synchronize:
- mode: pull
- checksum: yes
- src: /var/lib/openshift/openshift.local.certificates
- dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
- hosts: oo_nodes_to_config
- vars_files:
- - vars.yml
- vars:
- sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
- cert_parent_rel_path: openshift.local.certificates
- cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
- cert_base_path: /var/lib/openshift
- cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
- cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
- pre_tasks:
- - name: Ensure certificate directories exists
- file:
- path: "{{ item }}"
- state: directory
- with_items:
- - "{{ cert_path }}"
- - "{{ cert_parent_path }}/ca"
-
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
- # possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
- - name: Sync certs to nodes
- synchronize:
- checksum: yes
- src: "{{ item.src }}"
- dest: "{{ item.dest }}"
- owner: no
- group: no
- with_items:
- - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
- dest: "{{ cert_parent_path }}"
- - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
- dest: "{{ cert_parent_path }}/ca/cert.crt"
- - local_action: file name={{ sync_tmpdir }} state=absent
- run_once: true
- roles:
- - openshift_node
- - os_env_extras
- - os_env_extras_node
diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/libvirt/openshift-node/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml
deleted file mode 100644
index ad0c0fbe2..000000000
--- a/playbooks/libvirt/openshift-node/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
-openshift_debug_level: 4
diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data
deleted file mode 100644
index 5d779519f..000000000
--- a/playbooks/libvirt/templates/meta-data
+++ /dev/null
@@ -1,2 +0,0 @@
-instance-id: {{ item[0] }}
-local-hostname: {{ item[0] }}
diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data
deleted file mode 100644
index 985badc8e..000000000
--- a/playbooks/libvirt/templates/user-data
+++ /dev/null
@@ -1,10 +0,0 @@
-#cloud-config
-
-disable_root: 0
-
-system_info:
- default_user:
- name: root
-
-ssh_authorized_keys:
- - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 941190534..c55677c3f 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,7 +1,7 @@
---
- name: Set common OpenShift facts
openshift_facts:
- role: 'common'
+ role: common
local_facts:
cluster_id: "{{ openshift_cluster_id | default('default') }}"
debug_level: "{{ openshift_debug_level | default(0) }}"
@@ -10,7 +10,7 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-
+ deployment_type: "{{ openshift_deployment_type }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 50816d319..9f657a2c7 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -5,3 +5,7 @@
# chains with the public zone (or the zone associated with the correct
# interfaces)
os_firewall_use_firewalld: False
+
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0dd343443..1e0d5c605 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -21,8 +21,11 @@ class OpenShiftFactsUnsupportedRoleError(Exception):
class OpenShiftFactsFileWriteError(Exception):
pass
+class OpenShiftFactsMetadataUnavailableError(Exception):
+ pass
+
class OpenShiftFacts():
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns']
def __init__(self, role, filename, local_facts):
self.changed = False
@@ -169,20 +172,18 @@ class OpenShiftFacts():
return hostname
def get_defaults(self, roles):
- hardware_facts = self.get_hardware_facts()
- net_facts = self.get_net_facts()
- base_facts = self.get_base_facts()
+ ansible_facts = self.get_ansible_facts()
defaults = dict()
common = dict(use_openshift_sdn=True)
- ip = net_facts['default_ipv4']['address']
+ ip = ansible_facts['default_ipv4']['address']
common['ip'] = ip
common['public_ip'] = ip
rc, output, error = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if rc == 0 else ''
- hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+ hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']]
hostname = self.choose_hostname(hostname_values)
common['hostname'] = hostname
@@ -196,14 +197,14 @@ class OpenShiftFacts():
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
console_port='8443', etcd_use_ssl=False,
- etcd_port='4001')
+ etcd_port='4001', portal_net='172.30.17.0/24')
defaults['master'] = master
if 'node' in roles:
node = dict(external_id=common['hostname'], pod_cidr='',
labels={}, annotations={})
- node['resources_cpu'] = hardware_facts['processor_cores']
- node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ node['resources_cpu'] = ansible_facts['processor_cores']
+ node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
defaults['node'] = node
return defaults
@@ -226,8 +227,7 @@ class OpenShiftFacts():
def query_metadata(self, metadata_url, headers=None, expect_json=False):
r, info = fetch_url(module, metadata_url, headers=headers)
if info['status'] != 200:
- module.fail_json(msg='Failed to query metadata', result=r,
- info=info)
+ raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(r.read())
else:
@@ -252,40 +252,27 @@ class OpenShiftFacts():
def get_provider_metadata(self, metadata_url, supports_recursive=False,
headers=None, expect_json=False):
- if supports_recursive:
- metadata = self.query_metadata(metadata_url, headers, expect_json)
- else:
- metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ try:
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ except OpenShiftFactsMetadataUnavailableError as e:
+ metadata = None
return metadata
- def get_hardware_facts(self):
- if not hasattr(self, 'hardware_facts'):
- self.hardware_facts = Hardware().populate()
- return self.hardware_facts
-
- def get_base_facts(self):
- if not hasattr(self, 'base_facts'):
- self.base_facts = Facts().populate()
- return self.base_facts
-
- def get_virt_facts(self):
- if not hasattr(self, 'virt_facts'):
- self.virt_facts = Virtual().populate()
- return self.virt_facts
-
- def get_net_facts(self):
- if not hasattr(self, 'net_facts'):
- self.net_facts = Network(module).populate()
- return self.net_facts
+ def get_ansible_facts(self):
+ if not hasattr(self, 'ansible_facts'):
+ self.ansible_facts = ansible_facts(module)
+ return self.ansible_facts
def guess_host_provider(self):
# TODO: cloud provider facts should probably be submitted upstream
- virt_facts = self.get_virt_facts()
- hardware_facts = self.get_hardware_facts()
- product_name = hardware_facts['product_name']
- product_version = hardware_facts['product_version']
- virt_type = virt_facts['virtualization_type']
- virt_role = virt_facts['virtualization_role']
+ ansible_facts = self.get_ansible_facts()
+ product_name = ansible_facts['product_name']
+ product_version = ansible_facts['product_version']
+ virt_type = ansible_facts['virtualization_type']
+ virt_role = ansible_facts['virtualization_role']
provider = None
metadata = None
@@ -300,8 +287,9 @@ class OpenShiftFacts():
True)
# Filter sshKeys and serviceAccounts from gce metadata
- metadata['project']['attributes'].pop('sshKeys', None)
- metadata['instance'].pop('serviceAccounts', None)
+ if metadata:
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'ec2'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
@@ -310,12 +298,18 @@ class OpenShiftFacts():
provider = 'openstack'
metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
metadata = self.get_provider_metadata(metadata_url, True, None, True)
- ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
- metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
- # Filter public_keys and random_seed from openstack metadata
- metadata.pop('public_keys', None)
- metadata.pop('random_seed', None)
+ if metadata:
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+
+ if not metadata['ec2_compat']:
+ metadata = None
+
return dict(name=provider, metadata=metadata)
def normalize_provider_facts(self, provider, metadata):
@@ -479,4 +473,6 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils.urls import *
-main()
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index aa615df39..28bdda618 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -11,48 +11,96 @@
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+
+# TODO: These values need to be configurable
+- name: Set dns OpenShift facts
+ openshift_facts:
+ role: 'dns'
+ local_facts:
+ ip: "{{ openshift.common.ip }}"
+ domain: local
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Create certificate parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_cert_parent_dir }}"
+ state: directory
+
+- name: Create config parent directory if it doesn't exist
+ file:
+ path: "{{ openshift_master_config | dirname }}"
+ state: directory
+
+# TODO: should probably use a template lookup for this
+# TODO: should allow for setting --etcd, --kubernetes options
+# TODO: recreate config if values change
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
+
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create master config
+ command: >
+ /usr/bin/openshift start master --write-config
+ --config={{ openshift_master_config }}
+ --portal-net={{ openshift.master.portal_net }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
+ {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }}
+ {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }}
+ args:
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_master_config }}"
-# TODO: We should pre-generate the master config and point to the generated
-# config rather than setting command line flags here
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
- notify:
- - restart openshift-master
-
-# TODO: should this be populated by a fact based on the deployment type
-# (origin, online, enterprise)?
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-master
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
-- name: Create .kube directory
+- name: Create the OpenShift client config dir(s)
file:
- path: /root/.kube
+ path: "~{{ item }}/.config/openshift"
state: directory
mode: 0700
+ owner: "{{ item }}"
+ group: "{{ item }}"
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
+- name: Create the OpenShift client config(s)
+ command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
args:
- creates: /root/.kube/.kubeconfig
+ creates: ~{{ item }}/.config/openshift/.config
+ with_items:
+ - root
+ - "{{ ansible_ssh_user }}"
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
new file mode 100644
index 000000000..c52d957ac
--- /dev/null
+++ b/roles/openshift_master/vars/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_master_config: /etc/openshift/master.yaml
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e3c04585b..3d56bdd67 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -13,17 +13,22 @@
failed_when: not result.stat.exists
register: result
with_items:
- - "{{ cert_path }}"
- - "{{ cert_path }}/cert.crt"
- - "{{ cert_path }}/key.key"
- - "{{ cert_path }}/.kubeconfig"
- - "{{ cert_path }}/server.crt"
- - "{{ cert_path }}/server.key"
- - "{{ cert_parent_path }}/ca/cert.crt"
- #- "{{ cert_path }}/node.yaml"
+ - "{{ openshift_node_cert_dir }}"
+ - "{{ openshift_node_cert_dir }}/ca.crt"
+ - "{{ openshift_node_cert_dir }}/client.crt"
+ - "{{ openshift_node_cert_dir }}/client.key"
+ - "{{ openshift_node_cert_dir }}/.kubeconfig"
+ - "{{ openshift_node_cert_dir }}/node-config.yaml"
+ - "{{ openshift_node_cert_dir }}/server.crt"
+ - "{{ openshift_node_cert_dir }}/server.key"
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# --create-certs=false is a temporary workaround until
# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
@@ -32,16 +37,7 @@
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
- notify:
- - restart openshift-node
-
-- name: Set default registry url
- lineinfile:
- dest: /etc/sysconfig/openshift-node
- regexp: '^IMAGES='
- line: "IMAGES={{ openshift_registry_url }}"
- when: openshift_registry_url is defined
+ line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
notify:
- restart openshift-node
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
new file mode 100644
index 000000000..c6be83139
--- /dev/null
+++ b/roles/openshift_node/vars/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_node_cert_dir: /etc/openshift/node
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
index 3501e8922..a0befab44 100644
--- a/roles/openshift_register_nodes/defaults/main.yml
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -1,5 +1,2 @@
---
openshift_kube_api_version: v1beta1
-openshift_cert_dir: openshift.local.certificates
-openshift_cert_dir_parent: /var/lib/openshift
-openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index 8ebeb087a..afa9eb27d 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -1,12 +1,21 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# disable pylint checks
+# temporarily disabled until items can be addressed:
+# fixme - until all TODO comments have been addressed
+# permanently disabled unless someone wants to refactor the object model:
+# too-few-public-methods
+# no-self-use
+# too-many-arguments
+# too-many-locals
+# too-many-branches
+# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
+"""Ansible module to register a kubernetes node to the cluster"""
import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-from decimal import *
DOCUMENTATION = '''
---
@@ -93,72 +102,170 @@ EXAMPLES = '''
class ClientConfigException(Exception):
+ """Client Configuration Exception"""
pass
-class ClientConfig:
+class ClientConfig(object):
+ """ Representation of a client config
+
+ Attributes:
+ config (dict): dictionary representing the client configuration
+
+ Args:
+ client_opts (list of str): client options to use
+ module (AnsibleModule):
+
+ Raises:
+ ClientConfigException:
+ """
def __init__(self, client_opts, module):
- _, output, error = module.run_command(["/usr/bin/openshift", "ex",
- "config", "view", "-o",
- "json"] + client_opts,
- check_rc = True)
+ kubectl = module.params['kubectl_cmd']
+ _, output, _ = module.run_command((kubectl +
+ ["config", "view", "-o", "json"] +
+ client_opts), check_rc=True)
self.config = json.loads(output)
if not (bool(self.config['clusters']) or
bool(self.config['contexts']) or
bool(self.config['current-context']) or
bool(self.config['users'])):
- raise ClientConfigException(msg="Client config missing required " \
- "values",
- output=output)
+ raise ClientConfigException(
+ "Client config missing required values: %s" % output
+ )
def current_context(self):
+ """ Gets the current context for the client config
+
+ Returns:
+ str: The current context as set in the config
+ """
return self.config['current-context']
def section_has_value(self, section_name, value):
+ """ Test if specified section contains a value
+
+ Args:
+ section_name (str): config section to test
+ value (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
section = self.config[section_name]
if isinstance(section, dict):
return value in section
else:
val = next((item for item in section
- if item['name'] == value), None)
+ if item['name'] == value), None)
return val is not None
def has_context(self, context):
+ """ Test if specified context exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('contexts', context)
def has_user(self, user):
+ """ Test if specified user exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('users', user)
def has_cluster(self, cluster):
+ """ Test if specified cluster exists in config
+
+ Args:
+ context (str): value to test if present
+ Returns:
+ bool: True if successful, false otherwise
+ """
return self.section_has_value('clusters', cluster)
def get_value_for_context(self, context, attribute):
+ """ Get the value of attribute in context
+
+ Args:
+ context (str): context to search
+ attribute (str): attribute wanted
+ Returns:
+ str: The value for attribute in context
+ """
contexts = self.config['contexts']
if isinstance(contexts, dict):
return contexts[context][attribute]
else:
return next((c['context'][attribute] for c in contexts
- if c['name'] == context), None)
+ if c['name'] == context), None)
def get_user_for_context(self, context):
+ """ Get the user attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'user')
def get_cluster_for_context(self, context):
+ """ Get the cluster attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
return self.get_value_for_context(context, 'cluster')
-class Util:
+ def get_namespace_for_context(self, context):
+ """ Get the namespace attribute in context
+
+ Args:
+ context (str): context to search
+ Returns:
+ str: The value for the attribute in context
+ """
+ return self.get_value_for_context(context, 'namespace')
+
+class Util(object):
+ """Utility methods"""
@staticmethod
def remove_empty_elements(mapping):
+ """ Recursively removes empty elements from a dict
+
+ Args:
+ mapping (dict): dict to remove empty attributes from
+ Returns:
+ dict: A copy of the dict with empty elements removed
+ """
if isinstance(mapping, dict):
- m = mapping.copy()
+ copy = mapping.copy()
for key, val in mapping.iteritems():
if not val:
- del m[key]
- return m
+ del copy[key]
+ return copy
else:
return mapping
-class NodeResources:
+class NodeResources(object):
+ """ Kubernetes Node Resources
+
+ Attributes:
+ resources (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ """
def __init__(self, version, cpu=None, memory=None):
if version == 'v1beta1':
self.resources = dict(capacity=dict())
@@ -166,10 +273,31 @@ class NodeResources:
self.resources['capacity']['memory'] = memory
def get_resources(self):
+ """ Get the dict representing the node resources
+
+ Returns:
+ dict: representation of the node resources with any empty
+ elements removed
+ """
return Util.remove_empty_elements(self.resources)
-class NodeSpec:
- def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
+class NodeSpec(object):
+ """ Kubernetes Node Spec
+
+ Attributes:
+ spec (dict): A dictionary representing the node resources
+
+ Args:
+ version (str): kubernetes api version
+ cpu (str): string representation of the cpu resources for the node
+ memory (str): string representation of the memory resources for the
+ node
+ cidr (str): string representation of the cidr block available for
+ the node
+ externalID (str): The external id of the node
+ """
+ def __init__(self, version, cpu=None, memory=None, cidr=None,
+ externalID=None):
if version == 'v1beta3':
self.spec = dict(podCIDR=cidr, externalID=externalID,
capacity=dict())
@@ -177,67 +305,128 @@ class NodeSpec:
self.spec['capacity']['memory'] = memory
def get_spec(self):
+ """ Get the dict representing the node spec
+
+ Returns:
+ dict: representation of the node spec with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.spec)
-class NodeStatus:
- def addAddresses(self, addressType, addresses):
- addressList = []
+class NodeStatus(object):
+ """ Kubernetes Node Status
+
+ Attributes:
+ status (dict): A dictionary representing the node status
+
+ Args:
+ version (str): kubernetes api version
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ hostnames (list, optional): hostnames for the node
+ """
+ def add_addresses(self, address_type, addresses):
+ """ Adds addresses of the specified type
+
+ Args:
+ address_type (str): address type
+ addresses (list): addresses to add
+ """
+ address_list = []
for address in addresses:
- addressList.append(dict(type=addressType, address=address))
- return addressList
+ address_list.append(dict(type=address_type, address=address))
+ return address_list
- def __init__(self, version, externalIPs = [], internalIPs = [],
- hostnames = []):
+ def __init__(self, version, externalIPs=None, internalIPs=None,
+ hostnames=None):
if version == 'v1beta3':
- self.status = dict(addresses = addAddresses('ExternalIP',
- externalIPs) +
- addAddresses('InternalIP',
- internalIPs) +
- addAddresses('Hostname',
- hostnames))
+ addresses = []
+ if externalIPs is not None:
+ addresses += self.add_addresses('ExternalIP', externalIPs)
+ if internalIPs is not None:
+ addresses += self.add_addresses('InternalIP', internalIPs)
+ if hostnames is not None:
+ addresses += self.add_addresses('Hostname', hostnames)
+
+ self.status = dict(addresses=addresses)
def get_status(self):
+ """ Get the dict representing the node status
+
+ Returns:
+ dict: representation of the node status with any empty elements
+ removed
+ """
return Util.remove_empty_elements(self.status)
-class Node:
- def __init__(self, module, client_opts, version='v1beta1', name=None,
- hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
- cpu=None, memory=None, labels=dict(), annotations=dict(),
- podCIDR=None, externalID=None):
+class Node(object):
+ """ Kubernetes Node
+
+ Attributes:
+ status (dict): A dictionary representing the node
+
+ Args:
+ module (AnsibleModule):
+ client_opts (list): client connection options
+ version (str, optional): kubernetes api version
+ node_name (str, optional): name for node
+ hostIP (str, optional): node host ip
+ hostnames (list, optional): hostnames for the node
+ externalIPs (list, optional): externalIPs for the node
+ internalIPs (list, optional): internalIPs for the node
+ cpu (str, optional): cpu resources for the node
+ memory (str, optional): memory resources for the node
+ labels (list, optional): labels for the node
+ annotations (list, optional): annotations for the node
+ podCIDR (list, optional): cidr block to use for pods
+ externalID (str, optional): external id of the node
+ """
+ def __init__(self, module, client_opts, version='v1beta1', node_name=None,
+ hostIP=None, hostnames=None, externalIPs=None,
+ internalIPs=None, cpu=None, memory=None, labels=None,
+ annotations=None, podCIDR=None, externalID=None):
self.module = module
self.client_opts = client_opts
if version == 'v1beta1':
- self.node = dict(id = name,
- kind = 'Node',
- apiVersion = version,
- hostIP = hostIP,
- resources = NodeResources(version, cpu, memory),
- cidr = podCIDR,
- labels = labels,
- annotations = annotations,
- externalID = externalID
- )
+ self.node = dict(id=node_name,
+ kind='Node',
+ apiVersion=version,
+ hostIP=hostIP,
+ resources=NodeResources(version, cpu, memory),
+ cidr=podCIDR,
+ labels=labels,
+ annotations=annotations,
+ externalID=externalID)
elif version == 'v1beta3':
- metadata = dict(name = name,
- labels = labels,
- annotations = annotations
- )
- self.node = dict(kind = 'Node',
- apiVersion = version,
- metadata = metadata,
- spec = NodeSpec(version, cpu, memory, podCIDR,
- externalID),
- status = NodeStatus(version, externalIPs,
- internalIPs, hostnames),
- )
+ metadata = dict(name=node_name,
+ labels=labels,
+ annotations=annotations)
+ self.node = dict(kind='Node',
+ apiVersion=version,
+ metadata=metadata,
+ spec=NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status=NodeStatus(version, externalIPs,
+ internalIPs, hostnames))
def get_name(self):
+ """ Get the name for the node
+
+ Returns:
+ str: node name
+ """
if self.node['apiVersion'] == 'v1beta1':
return self.node['id']
elif self.node['apiVersion'] == 'v1beta3':
return self.node['name']
def get_node(self):
+ """ Get the dict representing the node
+
+ Returns:
+ dict: representation of the node with any empty elements
+ removed
+ """
node = self.node.copy()
if self.node['apiVersion'] == 'v1beta1':
node['resources'] = self.node['resources'].get_resources()
@@ -247,52 +436,82 @@ class Node:
return Util.remove_empty_elements(node)
def exists(self):
- _, output, error = self.module.run_command(["/usr/bin/osc", "get",
- "nodes"] + self.client_opts,
- check_rc = True)
+ """ Tests if the node already exists
+
+ Returns:
+ bool: True if node exists, otherwise False
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ _, output, _ = self.module.run_command((kubectl + ["get", "nodes"] +
+ self.client_opts),
+ check_rc=True)
if re.search(self.module.params['name'], output, re.MULTILINE):
return True
return False
def create(self):
- cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
- rc, output, error = self.module.run_command(cmd,
- data=self.module.jsonify(self.get_node()))
- if rc != 0:
+ """ Creates the node
+
+ Returns:
+ bool: True if node creation successful
+ """
+ kubectl = self.module.params['kubectl_cmd']
+ cmd = kubectl + self.client_opts + ['create', '-f', '-']
+ exit_code, output, error = self.module.run_command(
+ cmd, data=self.module.jsonify(self.get_node())
+ )
+ if exit_code != 0:
if re.search("minion \"%s\" already exists" % self.get_name(),
error):
- self.module.exit_json(changed=False,
- msg="node definition already exists",
- node=self.get_node())
+ self.module.exit_json(msg="node definition already exists",
+ changed=False, node=self.get_node())
else:
- self.module.fail_json(msg="Node creation failed.", rc=rc,
- output=output, error=error,
- node=self.get_node())
+ self.module.fail_json(msg="Node creation failed.",
+ exit_code=exit_code,
+ output=output, error=error,
+ node=self.get_node())
else:
return True
def main():
+ """ main """
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True, type = 'str'),
- host_ip = dict(type = 'str'),
- hostnames = dict(type = 'list', default = []),
- external_ips = dict(type = 'list', default = []),
- internal_ips = dict(type = 'list', default = []),
- api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
- choices = ['v1beta1', 'v1beta3']),
- cpu = dict(type = 'str'),
- memory = dict(type = 'str'),
- labels = dict(type = 'dict', default = {}), # TODO: needs documented
- annotations = dict(type = 'dict', default = {}), # TODO: needs documented
- pod_cidr = dict(type = 'str'), # TODO: needs documented
- external_id = dict(type = 'str'), # TODO: needs documented
- client_config = dict(type = 'str'), # TODO: needs documented
- client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
- client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ hostnames=dict(type='list', default=[]),
+ external_ips=dict(type='list', default=[]),
+ internal_ips=dict(type='list', default=[]),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ external_id=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:openshift-client'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
),
- mutually_exclusive = [
+ mutually_exclusive=[
['host_ip', 'external_ips'],
['host_ip', 'internal_ips'],
['host_ip', 'hostnames'],
@@ -300,7 +519,10 @@ def main():
supports_check_mode=True
)
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ client_config = '~/.kube/.kubeconfig'
+ if 'default_client_config' in module.params:
+ client_config = module.params['default_client_config']
+ user_has_client_config = os.path.exists(os.path.expanduser(client_config))
if not (user_has_client_config or module.params['client_config']):
module.fail_json(msg="Could not locate client configuration, "
"client_config must be specified if "
@@ -308,12 +530,17 @@ def main():
client_opts = []
if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+ kubeconfig_flag = '--kubeconfig'
+ if 'kubeconfig_flag' in module.params:
+ kubeconfig_flag = module.params['kubeconfig_flag']
+ client_opts.append(kubeconfig_flag + '=' +
+ os.path.expanduser(module.params['client_config']))
try:
config = ClientConfig(client_opts, module)
- except ClientConfigException as e:
- module.fail_json(msg="Failed to get client configuration", exception=e)
+ except ClientConfigException as ex:
+ module.fail_json(msg="Failed to get client configuration",
+ exception=str(ex))
client_context = module.params['client_context']
if config.has_context(client_context):
@@ -333,14 +560,16 @@ def main():
client_cluster = module.params['client_cluster']
if config.has_cluster(client_cluster):
- if client_cluster != config.get_cluster_for_context(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_context):
client_opts.append("--cluster=%s" % client_cluster)
else:
module.fail_json(msg="Cluster %s not found in client config" %
client_cluster)
- # TODO: provide sane defaults for some (like hostname, externalIP,
- # internalIP, etc)
+ client_namespace = module.params['client_namespace']
+ if client_namespace != config.get_namespace_for_context(client_context):
+ client_opts.append("--namespace=%s" % client_namespace)
+
node = Node(module, client_opts, module.params['api_version'],
module.params['name'], module.params['host_ip'],
module.params['hostnames'], module.params['external_ips'],
@@ -364,7 +593,8 @@ def main():
module.fail_json(msg="Unknown error creating node",
node=node.get_node())
-
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index 7319b88b1..d4d72d126 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -3,53 +3,44 @@
# TODO: recreate master/node configs if settings that affect the configs
# change (hostname, public_hostname, ip, public_ip, etc)
-# TODO: create a failed_when condition
-- name: Create node server certificates
- command: >
- /usr/bin/openshift admin create-server-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
- --hostnames={{ [item.openshift.common.hostname,
- item.openshift.common.public_hostname]|unique|join(",") }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: openshift_nodes
- register: server_cert_result
+# TODO: use a template lookup here
# TODO: create a failed_when condition
-- name: Create node client certificates
- command: >
- /usr/bin/openshift admin create-node-cert
- --overwrite=false
- --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --node-name={{ item.openshift.common.hostname }}
- args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
- with_items: openshift_nodes
- register: node_cert_result
+- name: Use enterprise default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-# TODO: create a failed_when condition
-- name: Create kubeconfigs for nodes
+- name: Use online default for openshift_registry_url if not set
+ set_fact:
+ openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
+ when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+
+- name: Create node config
command: >
- /usr/bin/openshift admin create-kubeconfig
- --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
- --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
- --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ /usr/bin/openshift admin create-node-config
+ --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
+ --node={{ item.openshift.common.hostname }}
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
+ --dns-domain={{ openshift.dns.domain }}
+ --dns-ip={{ openshift.dns.ip }}
+ --master={{ openshift.master.api_url }}
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-cert={{ openshift_master_ca_cert }}
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --signer-serial={{ openshift_master_ca_dir }}/serial.txt
+ --node-client-certificate-authority={{ openshift_master_ca_cert }}
+ {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
+ --listen=https://0.0.0.0:10250
args:
- chdir: "{{ openshift_cert_dir_parent }}"
- creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
+ chdir: "{{ openshift_cert_parent_dir }}"
+ creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
with_items: openshift_nodes
- register: kubeconfig_result
- name: Register unregistered nodes
kubernetes_register_node:
- client_user: openshift-client
+ kubectl_cmd: ['osc']
+ default_client_config: '~/.config/openshift/.config'
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
@@ -61,7 +52,5 @@
external_id: "{{ item.openshift.node.external_id }}"
# TODO: support customizing other attributes such as: client_config,
# client_cluster, client_context, client_user
- # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
- # internal_ips, external_id
with_items: openshift_nodes
register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
new file mode 100644
index 000000000..bd497f08f
--- /dev/null
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -0,0 +1,7 @@
+---
+openshift_cert_parent_dir: /var/lib/openshift
+openshift_cert_relative_dir: openshift.local.certificates
+openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
+openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
+openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
index 6713e11fc..6bbedd839 100644
--- a/roles/openshift_repos/README.md
+++ b/roles/openshift_repos/README.md
@@ -14,7 +14,7 @@ Role Variables
| Name | Default value | |
|-------------------------------|---------------|----------------------------------------------|
-| openshift_deployment_type | online | Possible values enterprise, origin, online |
+| openshift_deployment_type | None | Possible values enterprise, origin, online |
| openshift_additional_repos | {} | TODO |
Dependencies
diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 1730207f4..7c5a14cd7 100644
--- a/roles/openshift_repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,7 +1,2 @@
---
-# TODO: once we are able to configure/deploy origin using the openshift roles,
-# then we should default to origin
-
-# TODO: push the defaulting of these values to the openshift_facts module
-openshift_deployment_type: online
openshift_additional_repos: {}
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
deleted file mode 100644
index 7b40671a4..000000000
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
+++ /dev/null
@@ -1,61 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT
-kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A
-BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo
-gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P
-xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D
-FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7
-Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i
-QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm
-G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt
-0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR
-fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB
-tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv
-bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT
-ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy
-6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ
-OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6
-0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc
-MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u
-QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE
-Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6
-DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0
-B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH
-V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT
-CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ==
-=21pb
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG
-package. Questions about this key should be sent to security@redhat.com.
-
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.0.6 (GNU/Linux)
-Comment: For info see http://www.gnupg.org
-
-mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp
-Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd
-LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi
-UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe
-II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW
-QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz
-+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1
-VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI
-mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg
-SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX
-BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4
-F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF
-AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q
-0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc
-RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI
-JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR
-xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU
-ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5
-WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI
-RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL
-yACfb68fBd2pWEzLKsOk9imIobHHpzE=
-=gpIn
------END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
deleted file mode 100644
index 0f83b622d..000000000
--- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
+++ /dev/null
@@ -1,63 +0,0 @@
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is used for packages in Red Hat
-products shipped after November 2009, and for all updates to those
-products.
-
-Questions about this key should be sent to security@redhat.com.
-
-pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) <security@redhat.com>
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF
-0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF
-0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c
-u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh
-XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H
-5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW
-9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj
-/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1
-PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY
-HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF
-buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB
-tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0
-LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK
-CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC
-2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf
-C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5
-un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E
-0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE
-IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh
-8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL
-Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki
-JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25
-OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq
-dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==
-=zbHE
------END PGP PUBLIC KEY BLOCK-----
-The following public key can be used to verify RPM packages built and
-signed by Red Hat, Inc. This key is a supporting (auxiliary) key for
-Red Hat products shipped after November 2006 and for all updates to
-those products.
-
-Questions about this key should be sent to security@redhat.com.
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.2.6 (GNU/Linux)
-
-mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT
-VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A
-UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C
-yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu
-MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg
-Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z
-z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2
-eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq
-SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg
-SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC
-AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg
-ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX
-=d/bm
------END PGP PUBLIC KEY BLOCK-----
-
diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo
deleted file mode 100644
index 1deae2939..000000000
--- a/roles/openshift_repos/files/online/epel7-kubernetes.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-epel7-kubernetes]
-name=Copr repo for epel7-kubernetes owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/
-skip_if_unavailable=True
-gpgcheck=0
-enabled=1
diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo
deleted file mode 100644
index c7629872d..000000000
--- a/roles/openshift_repos/files/online/epel7-openshift.repo
+++ /dev/null
@@ -1,6 +0,0 @@
-[maxamillion-origin-next]
-name=Copr repo for origin-next owned by maxamillion
-baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
-skip_if_unavailable=False
-gpgcheck=0
-enabled=1
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
deleted file mode 100644
index cfe41f691..000000000
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
+++ /dev/null
@@ -1,23 +0,0 @@
-[oso-rhui-rhel-server-extras]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-extras-htb]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/
-enabled=0
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta
-failovermethod=priority
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
deleted file mode 100644
index ddc93193d..000000000
--- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
+++ /dev/null
@@ -1,21 +0,0 @@
-[oso-rhui-rhel-server-releases]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
-
-[oso-rhui-rhel-server-releases-optional]
-name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
- https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
-enabled=1
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
-sslverify=False
-sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
new file mode 100644
index 000000000..d324c142a
--- /dev/null
+++ b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
@@ -0,0 +1,10 @@
+[enterprise-v3]
+name=OpenShift Enterprise Beta3
+baseurl=https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta3/
+ https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta3/
+enabled=1
+gpgcheck=0
+failovermethod=priority
+sslverify=False
+sslclientcert=/var/lib/yum/client-cert.pem
+sslclientkey=/var/lib/yum/client-key.pem \ No newline at end of file
diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
new file mode 100644
index 000000000..0b21e0a65
--- /dev/null
+++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo
@@ -0,0 +1,7 @@
+[maxamillion-origin-next]
+name=Copr repo for origin-next owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg
+enabled=1
diff --git a/roles/openshift_repos/files/removed/repos/epel7-openshift.repo b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/epel7-openshift.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-extras.repo
diff --git a/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_repos/files/removed/repos/oso-rhui-rhel-7-server.repo
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index bb1551d37..12e98b7a1 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -10,10 +10,6 @@
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
-# TODO: remove this when origin support actually works
-- fail: msg="OpenShift Origin support is not currently enabled"
- when: openshift_deployment_type == 'origin'
-
- name: Ensure libselinux-python is installed
yum:
pkg: libselinux-python
@@ -36,17 +32,15 @@
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- - '*/*'
- when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+ - '*/repos/*'
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | match("RPM-GPG-KEY-")
+ - "{{ openshift_deployment_type }}/gpg_keys/*"
- name: Configure yum repositories
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- - "{{ openshift_deployment_type }}/*"
- when: item | basename | search(".*\.repo$")
+ - "{{ openshift_deployment_type }}/repos/*"
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..2d9243545 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -1,4 +1,3 @@
-# {{ ansible_managed }}
{% for repo in openshift_additional_repos %}
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index f2d61043b..77e7a80ba 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -12,12 +12,21 @@
yum:
pkg: openshift-sdn-master
state: installed
+ register: install_result
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-master settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
+ -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
+ -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
+ -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
notify:
- restart openshift-sdn-master
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index 729c28879..37a30d019 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -9,9 +9,15 @@
yum:
pkg: openshift-sdn-node
state: installed
+ register: install_result
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
# TODO: we are specifying -hostname= for OPTIONS as a workaround for
# openshift-sdn-node not properly detecting the hostname.
+# TODO: we should probably generate certs specifically for sdn
- name: Configure openshift-sdn-node settings
lineinfile:
dest: /etc/sysconfig/openshift-sdn-node
@@ -20,17 +26,33 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
+ -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
+ -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
+ -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- regex: '^(MASTER_URL=)'
line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
line: '\1"{{ openshift.common.ip }}"'
- # TODO lock down the insecure-registry config to a more sane value than
- # 0.0.0.0/0
- - regex: '^(DOCKER_OPTIONS=)'
- line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
notify: restart openshift-sdn-node
+- name: Ensure we aren't setting DOCKER_OPTIONS in /etc/sysconfig/openshift-sdn-node
+ lineinfile:
+ dest: /etc/sysconfig/openshift-sdn-node
+ regexp: '^DOCKER_OPTIONS='
+ state: absent
+ notify: restart openshift-sdn-node
+
+# TODO lock down the insecure-registry config to a more sane value than
+# 0.0.0.0/0
+- name: Configure docker insecure-registry setting
+ lineinfile:
+ dest: /etc/sysconfig/docker
+ regexp: INSECURE_REGISTRY=
+ line: INSECURE_REGISTRY='--insecure-registry=0.0.0.0/0'
+ notify: restart openshift-sdn-node
+
+
- name: Start and enable openshift-sdn-node
service:
name: openshift-sdn-node
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index 90588d2ae..1cb539a8c 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-
+# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
@@ -17,6 +17,7 @@ EXAMPLES = '''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
+ super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
@@ -36,13 +37,14 @@ class IpTablesSaveError(IpTablesError):
class IpTablesCreateChainError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
- super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code, output)
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
+ super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
+ output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
- def __init__(self, chain, msg, cmd, exit_code, output):
+ def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
@@ -51,7 +53,7 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
-class IpTablesManager:
+class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
@@ -68,10 +70,10 @@ class IpTablesManager:
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
@@ -93,13 +95,13 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
- "%s %s" % (self.proto, self.port),
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ "%s %s" % (proto, port),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
@@ -113,15 +115,15 @@ class IpTablesManager:
self.output.append(check_output(cmd))
self.changed = True
self.save()
- except subprocess.CalledProcessError as e:
- raise IpTablesRemoveChainError(
+ except subprocess.CalledProcessError as ex:
+ raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
- cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
- return True if subprocess.call(check_cmd) == 0 else False
+ return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
@@ -137,7 +139,7 @@ class IpTablesManager:
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
- input_rules = map(lambda s: s.split(), output.split('\n'))
+ input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
@@ -150,42 +152,38 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid rule
- if not last_rule_num or not last_rule_target:
- raise IpTablesCreateJumpRuleError(
- chain=self.chain,
- msg="Failed to find existing %s rules" % self.jump_rule_chain,
- cmd=None, exit_code=None, output=None)
-
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
- if last_rule_target == 'REJECT':
+ if (last_rule_num and last_rule_target
+ and last_rule_target == 'REJECT'):
# insert rule
- cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain,
+ str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
- changed = True
+ self.changed = True
self.output.append(output)
self.save()
- except subprocess.CalledProcessError as e:
- if '--line-numbers' in e.cmd:
+ except subprocess.CalledProcessError as ex:
+ if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing %s rules to " % self.jump_rule_chain +
- "determine jump rule location",
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to query existing " +
+ self.jump_rule_chain +
+ " rules to determine jump rule location"),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to create jump rule for chain %s" %
- self.chain,
- cmd=e.cmd, exit_code=e.returncode,
- output=e.output)
+ msg=("Failed to create jump rule for chain " +
+ self.chain),
+ cmd=ex.cmd, exit_code=ex.returncode,
+ output=ex.output)
def create_chain(self):
if self.check_mode:
@@ -200,27 +198,26 @@ class IpTablesManager:
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
- except subprocess.CalledProcessError as e:
+ except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
- cmd=e.cmd, exit_code=e.returncode, output=e.output
+ cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
- return True if subprocess.call(cmd) == 0 else False
+ return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
- def gen_save_cmd(self):
- cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
@@ -228,7 +225,8 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+ action=dict(required=True, choices=['add', 'remove',
+ 'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
@@ -261,13 +259,15 @@ def main():
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
- except IpTablesError as e:
- module.fail_json(msg=e.msg)
+ except IpTablesError as ex:
+ module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
-main()
+if __name__ == '__main__':
+ main()
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index b6bddd5c5..5089eb3e0 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -44,6 +44,7 @@
- iptables
- ip6tables
when: pkg_check.rc == 0
+ ignore_errors: yes
# TODO: Ansible 1.9 will eliminate the need for separate firewalld tasks for
# enabling rules and making them permanent with the immediate flag
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 7b5c00a9b..9af9d8d29 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -42,6 +42,7 @@
register: result
changed_when: "'firewalld' in result.stdout"
when: pkg_check.rc == 0
+ ignore_errors: yes
- name: Add iptables allow rules
os_firewall_manage_iptables: