summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--BUILD.md44
-rw-r--r--README.md6
-rw-r--r--README_AWS.md48
-rw-r--r--README_GCE.md27
-rw-r--r--README_OSE.md142
-rw-r--r--README_libvirt.md92
-rw-r--r--ansible.cfg23
-rwxr-xr-xbin/cluster190
-rwxr-xr-xbin/ohi110
-rw-r--r--bin/openshift-ansible-bin.spec65
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
-rw-r--r--bin/openshift_ansible/awsutil.py (renamed from bin/awsutil.py)70
-rwxr-xr-xbin/opssh79
-rwxr-xr-xbin/oscp28
-rwxr-xr-xbin/ossh26
-rwxr-xr-xbin/ossh_bash_completion23
-rwxr-xr-xcluster.sh113
-rw-r--r--filter_plugins/oo_filters.py114
-rw-r--r--inventory/aws/group_vars/all2
-rw-r--r--inventory/byo/group_vars/all28
-rw-r--r--inventory/byo/hosts10
-rw-r--r--inventory/gce/group_vars/all2
-rw-r--r--inventory/libvirt/group_vars/all2
-rw-r--r--inventory/libvirt/hosts2
-rwxr-xr-xinventory/multi_ec2.py17
-rw-r--r--inventory/openshift-ansible-inventory.spec50
l---------playbooks/adhoc/noc/filter_plugins1
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml41
l---------playbooks/adhoc/noc/roles1
-rw-r--r--playbooks/aws/ansible-tower/launch.yml2
l---------playbooks/aws/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml62
-rw-r--r--playbooks/aws/openshift-cluster/launch_instances.yml63
-rw-r--r--playbooks/aws/openshift-cluster/list.yml17
l---------playbooks/aws/openshift-cluster/roles1
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml14
-rw-r--r--playbooks/aws/openshift-cluster/update.yml13
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml1
-rw-r--r--playbooks/aws/openshift-master/config.yml42
-rw-r--r--playbooks/aws/openshift-master/launch.yml11
-rw-r--r--playbooks/aws/openshift-master/terminate.yml52
-rw-r--r--playbooks/aws/openshift-master/vars.yml1
-rw-r--r--playbooks/aws/openshift-node/config.yml130
-rw-r--r--playbooks/aws/openshift-node/launch.yml15
-rw-r--r--playbooks/aws/openshift-node/terminate.yml52
-rw-r--r--playbooks/aws/openshift-node/vars.yml1
-rw-r--r--playbooks/byo/config.yml6
l---------playbooks/byo/filter_plugins1
-rw-r--r--playbooks/byo/openshift-master/config.yml9
l---------playbooks/byo/openshift-master/filter_plugins1
l---------playbooks/byo/openshift-master/roles1
-rw-r--r--playbooks/byo/openshift-node/config.yml79
l---------playbooks/byo/openshift-node/filter_plugins1
l---------playbooks/byo/openshift-node/roles1
l---------playbooks/byo/roles1
l---------playbooks/gce/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml62
-rw-r--r--playbooks/gce/openshift-cluster/launch_instances.yml44
-rw-r--r--playbooks/gce/openshift-cluster/list.yml17
l---------playbooks/gce/openshift-cluster/roles1
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml20
-rw-r--r--playbooks/gce/openshift-cluster/update.yml13
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml1
-rw-r--r--playbooks/gce/openshift-master/config.yml36
-rw-r--r--playbooks/gce/openshift-master/launch.yml14
-rw-r--r--playbooks/gce/openshift-master/terminate.yml19
-rw-r--r--playbooks/gce/openshift-master/vars.yml1
-rw-r--r--playbooks/gce/openshift-node/config.yml121
-rw-r--r--playbooks/gce/openshift-node/launch.yml24
-rw-r--r--playbooks/gce/openshift-node/terminate.yml19
-rw-r--r--playbooks/gce/openshift-node/vars.yml1
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml65
-rw-r--r--playbooks/libvirt/openshift-cluster/launch_instances.yml102
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml43
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml41
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml7
-rw-r--r--playbooks/libvirt/openshift-master/config.yml21
l---------playbooks/libvirt/openshift-master/filter_plugins1
l---------playbooks/libvirt/openshift-master/roles1
-rw-r--r--playbooks/libvirt/openshift-master/vars.yml1
-rw-r--r--playbooks/libvirt/openshift-node/config.yml102
l---------playbooks/libvirt/openshift-node/filter_plugins1
l---------playbooks/libvirt/openshift-node/roles1
-rw-r--r--playbooks/libvirt/openshift-node/vars.yml1
-rw-r--r--playbooks/libvirt/templates/domain.xml62
-rw-r--r--playbooks/libvirt/templates/meta-data2
-rw-r--r--playbooks/libvirt/templates/user-data10
-rw-r--r--rel-eng/packages/.readme3
-rw-r--r--rel-eng/packages/openshift-ansible-bin1
-rw-r--r--rel-eng/packages/openshift-ansible-inventory1
-rw-r--r--rel-eng/tito.props5
-rw-r--r--roles/ansible_tower/tasks/main.yaml7
-rw-r--r--roles/docker/tasks/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml11
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j211
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/openshift_common/README.md20
-rw-r--r--roles/openshift_common/defaults/main.yml7
-rw-r--r--roles/openshift_common/meta/main.yml2
-rw-r--r--roles/openshift_common/tasks/main.yml31
-rw-r--r--roles/openshift_common/tasks/set_facts.yml9
-rw-r--r--roles/openshift_common/vars/main.yml5
-rw-r--r--roles/openshift_facts/README.md34
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py482
-rw-r--r--roles/openshift_facts/meta/main.yml15
-rw-r--r--roles/openshift_facts/tasks/main.yml3
-rw-r--r--roles/openshift_master/README.md29
-rw-r--r--roles/openshift_master/defaults/main.yml13
-rw-r--r--roles/openshift_master/handlers/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml77
-rw-r--r--roles/openshift_master/vars/main.yml2
-rw-r--r--roles/openshift_node/README.md7
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/library/openshift_register_node.py211
-rw-r--r--roles/openshift_node/tasks/main.yml84
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_register_nodes/README.md34
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml5
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py371
-rw-r--r--roles/openshift_register_nodes/meta/main.yml17
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml67
-rw-r--r--roles/openshift_repos/README.md38
-rw-r--r--roles/openshift_repos/defaults/main.yaml (renamed from roles/repos/defaults/main.yaml)2
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta (renamed from roles/repos/files/online/RPM-GPG-KEY-redhat-beta)0
-rw-r--r--roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release (renamed from roles/repos/files/online/RPM-GPG-KEY-redhat-release)0
-rw-r--r--roles/openshift_repos/files/online/epel7-kubernetes.repo (renamed from roles/repos/files/online/epel7-kubernetes.repo)0
-rw-r--r--roles/openshift_repos/files/online/epel7-openshift.repo (renamed from roles/repos/files/online/epel7-openshift.repo)0
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo (renamed from roles/repos/files/online/oso-rhui-rhel-7-extras.repo)0
-rw-r--r--roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo (renamed from roles/repos/files/online/oso-rhui-rhel-7-server.repo)0
-rw-r--r--roles/openshift_repos/files/online/rhel-7-libra-candidate.repo (renamed from roles/repos/files/online/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/openshift_repos/meta/main.yml15
-rw-r--r--roles/openshift_repos/tasks/main.yaml (renamed from roles/repos/tasks/main.yaml)11
-rw-r--r--roles/openshift_repos/templates/yum_repo.j2 (renamed from roles/repos/templates/yum_repo.j2)0
-rw-r--r--roles/openshift_repos/vars/main.yml (renamed from roles/repos/vars/main.yml)0
-rw-r--r--roles/openshift_sdn_master/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_master/meta/main.yml3
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml18
-rw-r--r--roles/openshift_sdn_node/README.md9
-rw-r--r--roles/openshift_sdn_node/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_node/meta/main.yml3
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml23
-rw-r--r--roles/os_env_extras_node/tasks/main.yml5
-rwxr-xr-x[-rw-r--r--]roles/os_firewall/library/os_firewall_manage_iptables.py63
-rw-r--r--roles/os_firewall/meta/main.yml1
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml5
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml12
-rw-r--r--roles/os_update_latest/tasks/main.yml3
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py273
-rw-r--r--roles/yum_repos/README.md113
-rw-r--r--roles/yum_repos/defaults/main.yml3
-rw-r--r--roles/yum_repos/meta/main.yml8
-rw-r--r--roles/yum_repos/tasks/main.yml47
-rw-r--r--roles/yum_repos/templates/yumrepo.j218
161 files changed, 4255 insertions, 853 deletions
diff --git a/BUILD.md b/BUILD.md
new file mode 100644
index 000000000..0016c96a5
--- /dev/null
+++ b/BUILD.md
@@ -0,0 +1,44 @@
+# openshift-ansible RPM Build instructions
+We use tito to make building and tracking revisions easy.
+
+For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page").
+
+
+## Build openshift-ansible-bin
+- Change into openshift-ansible/bin
+```
+cd openshift-ansible/bin
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
+
+
+## Build openshift-ansible-inventory
+- Change into openshift-ansible/inventory
+```
+cd openshift-ansible/inventory
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
diff --git a/README.md b/README.md
index ffdfee6f2..87dbfc1ea 100644
--- a/README.md
+++ b/README.md
@@ -20,10 +20,14 @@ Setup
- Setup for a specific cloud:
- [AWS](README_AWS.md)
- [GCE](README_GCE.md)
+ - [local VMs](README_libvirt.md)
+
+- Build
+ - [How to build the openshift-ansible rpms](BUILD.md)
- Directory Structure:
- [cloud.rb](cloud.rb) - light wrapper around Ansible
- - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+ - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
- [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
- [inventory/](inventory) - houses Ansible dynamic inventory scripts
- [lib/](lib) - library components of cloud.rb
diff --git a/README_AWS.md b/README_AWS.md
index fb9d0f895..37f4c5f51 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -14,7 +14,7 @@ Create a credentials file
export AWS_ACCESS_KEY_ID='AKIASTUFF'
export AWS_SECRET_ACCESS_KEY='STUFF'
```
-1. source this file
+2. source this file
```
source ~/.aws_creds
```
@@ -23,7 +23,7 @@ Note: You must source this file in each shell that you want to run cloud.rb
(Optional) Setup your $HOME/.ssh/config file
-------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
to setup a private key file to allow ansible to connect to the created hosts.
To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
@@ -34,6 +34,24 @@ Host *.compute-1.amazonaws.com
Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
+(Optional) Choose where the cluster will be launched
+----------------------------------------------------
+
+By default, a cluster is launched with the following configuration:
+
+- Instance type: m3.large
+- AMI: ami-307b3658
+- Region: us-east-1
+- Keypair name: libra
+- Security group: public
+
+If needed, these values can be changed by setting environment variables on your system.
+
+- export ec2_instance_type='m3.large'
+- export ec2_ami='ami-307b3658'
+- export ec2_region='us-east-1'
+- export ec2_keypair='libra'
+- export ec2_security_group='public'
Install Dependencies
--------------------
@@ -51,7 +69,29 @@ OSX:
Test The Setup
--------------
1. cd openshift-ansible
-1. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all ec2 instances being listed)
+```
+ bin/cluster list aws ''
+```
+
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
+```
+ bin/cluster create aws <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+ bin/cluster update aws <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
```
- ./cloud.rb aws list
+ bin/cluster terminate aws <cluster-id>
```
diff --git a/README_GCE.md b/README_GCE.md
index b00598113..f6c5138c1 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -4,7 +4,7 @@ GCE Setup Instructions
Get a gce service key
---------------------
-1. ask your GCE project administrator for a GCE service key
+1. Ask your GCE project administrator for a GCE service key
Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
@@ -65,12 +65,29 @@ Install Dependencies
Test The Setup
--------------
1. cd openshift-ansible/
-2. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all gce instances being listed)
```
- ./cloud.rb gce list
+ bin/cluster list gce ''
```
-3. Try to create an instance:
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
```
- ./cloud.rb gce launch -n ${USER}-node1 -e int --type os3-node
+ bin/cluster create gce <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+ bin/cluster update gce <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+ bin/cluster terminate gce <cluster-id>
```
diff --git a/README_OSE.md b/README_OSE.md
new file mode 100644
index 000000000..6ebdb7f99
--- /dev/null
+++ b/README_OSE.md
@@ -0,0 +1,142 @@
+# Installing OSEv3 from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+
+## Requirements
+* ansible
+ * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+
+ * Available in Fedora channels
+ * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* ssh key based auth for the root user needs to be pre-configured from the host
+ running ansible to the remote hosts
+* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
+
+ ```sh
+ git clone https://github.com/openshift/openshift-ansible.git
+ cd openshift-ansible
+ ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+ * the current git branch for testing
+ * how the inventory file should be configured
+ * variables that need to be set
+ * bootstrapping steps
+ * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+ pre-registered with subscription-manager or have the RHEL base repo
+ pre-configured. If using subscription-manager the following commands will
+ disable all but the rhel-7-server rhel-7-server-extras and
+ rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-server-7-ose-beta-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+* End-to-end testing has not been completed yet using this module
+* root user is used for all ansible actions; eventually we will support using
+ a non-root user with sudo.
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Creating the default variables for the hosts and host groups
+[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9)
+
+#### Group vars for all hosts
+/etc/ansible/group_vars/all:
+```yaml
+---
+# Assume that we want to use the root as the ssh user for all hosts
+ansible_ssh_user: root
+
+# Default debug level for all OpenShift hosts
+openshift_debug_level: 4
+
+# Set the OpenShift deployment type for all hosts
+openshift_deployment_type: enterprise
+
+# Override the default registry for development
+openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# To use the latest OpenShift Enterprise Errata puddle:
+#openshift_additional_repos:
+#- id: ose-devel
+# name: ose-devel
+# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+# enabled: 1
+# gpgcheck: 0
+# To use the latest OpenShift Enterprise Whitelist puddle:
+openshift_additional_repos:
+- id: ose-devel
+ name: ose-devel
+ baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+ enabled: 1
+ gpgcheck: 0
+
+```
+
+## Running the ansible playbooks
+From the openshift-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts and the
+group_vars are defined in /etc/ansible/group_vars, if using a different
+inventory file (and a group_vars directory that is in the same directory as
+the directory as the inventory) use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+systemctl restart openshift-sdn-master
+openshift ex router --create=true \
+ --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+openshift ex registry --create=true \
+ --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
+ --mount-host=/var/lib/openshift/docker-registry
+```
diff --git a/README_libvirt.md b/README_libvirt.md
new file mode 100644
index 000000000..fd2eb57f6
--- /dev/null
+++ b/README_libvirt.md
@@ -0,0 +1,92 @@
+
+LIBVIRT Setup instructions
+==========================
+
+`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
+
+This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+
+Install dependencies
+--------------------
+
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+ * ``systemctl enable libvirtd``
+ * ``systemctl start libvirtd``
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
+
+```
+sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
+polkit.addRule(function(action, subject) {
+ if (action.id == "org.libvirt.unix.manage" &&
+ subject.user == "$USER") {
+ return polkit.Result.YES;
+ polkit.log("action=" + action);
+ polkit.log("subject=" + subject);
+ }
+});
+EOF
+```
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:
+* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
+ * backed by a filesystem with a lot of free disk space
+ * writable by your user;
+ * accessible by the qemu user.
+* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+Test the setup
+--------------
+
+```
+cd openshift-ansible
+
+bin/cluster create -m 1 -n 3 libvirt lenaic
+
+bin/cluster terminate libvirt lenaic
+```
diff --git a/ansible.cfg b/ansible.cfg
new file mode 100644
index 000000000..6a7722ad8
--- /dev/null
+++ b/ansible.cfg
@@ -0,0 +1,23 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+# Uncomment to use the provided BYO inventory
+#hostfile = inventory/byo/hosts
+
+# Uncomment to use the provided GCE dynamic inventory script
+#hostfile = inventory/gce/gce.py
+
+# Uncomment to use the provided AWS dynamic inventory script
+#hostfile = inventory/aws/ec2.py
diff --git a/bin/cluster b/bin/cluster
new file mode 100755
index 000000000..ca227721e
--- /dev/null
+++ b/bin/cluster
@@ -0,0 +1,190 @@
+#!/usr/bin/env python2
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+ """
+ Control and Configuration Interface for OpenShift Clusters
+ """
+ def __init__(self):
+ # setup ansible ssh environment
+ if 'ANSIBLE_SSH_ARGS' not in os.environ:
+ os.environ['ANSIBLE_SSH_ARGS'] = (
+ '-o ForwardAgent=yes '
+ '-o StrictHostKeyChecking=no '
+ '-o UserKnownHostsFile=/dev/null '
+ '-o ControlMaster=auto '
+ '-o ControlPersist=600s '
+ )
+
+ def create(self, args):
+ """
+ Create an OpenShift cluster for given provider
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id}
+ playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ env['num_masters'] = args.masters
+ env['num_nodes'] = args.nodes
+
+ return self.action(args, inventory, env, playbook)
+
+ def terminate(self, args):
+ """
+ Destroy OpenShift cluster
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id}
+ playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def list(self, args):
+ """
+ List VMs in cluster
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id}
+ playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def update(self, args):
+ """
+ Update to latest OpenShift across clustered VMs
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id}
+ playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
+ def setup_provider(self, provider):
+ """
+ Setup ansible playbook environment
+ :param provider: command line arguments provided by user
+ :return: path to inventory for given provider
+ """
+ config = ConfigParser.ConfigParser()
+ if 'gce' == provider:
+ config.readfp(open('inventory/gce/gce.ini'))
+
+ for key in config.options('gce'):
+ os.environ[key] = config.get('gce', key)
+
+ inventory = '-i inventory/gce/gce.py'
+ elif 'aws' == provider:
+ config.readfp(open('inventory/aws/ec2.ini'))
+
+ for key in config.options('ec2'):
+ os.environ[key] = config.get('ec2', key)
+
+ inventory = '-i inventory/aws/ec2.py'
+ elif 'libvirt' == provider:
+ inventory = '-i inventory/libvirt/hosts'
+ else:
+ # this code should never be reached
+ raise ValueError("invalid PROVIDER {}".format(provider))
+
+ return inventory
+
+ def action(self, args, inventory, env, playbook):
+ """
+ Build ansible-playbook command line and execute
+ :param args: command line arguments provided by user
+ :param inventory: derived provider library
+ :param env: environment variables for kubernetes
+ :param playbook: ansible playbook to execute
+ :return: exit status from ansible-playbook command
+ """
+
+ verbose = ''
+ if args.verbose > 0:
+ verbose = '-{}'.format('v' * args.verbose)
+
+ ansible_env = '-e \'{}\''.format(
+ ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+ )
+
+ command = 'ansible-playbook {} {} {} {}'.format(
+ verbose, inventory, ansible_env, playbook
+ )
+
+ if args.verbose > 1:
+ command = 'time {}'.format(command)
+
+ if args.verbose > 0:
+ sys.stderr.write('RUN [{}]\n'.format(command))
+ sys.stderr.flush()
+
+ return os.system(command)
+
+
+if __name__ == '__main__':
+ """
+ Implemented to support writing unit tests
+ """
+
+ cluster = Cluster()
+
+ providers = ['gce', 'aws', 'libvirt']
+ parser = argparse.ArgumentParser(
+ description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+ )
+ parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+ parser.add_argument('--version', action='version', version='%(prog)s 0.2')
+
+ meta_parser = argparse.ArgumentParser(add_help=False)
+ meta_parser.add_argument('provider', choices=providers, help='provider')
+ meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+
+ action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions')
+
+ create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser])
+ create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
+ create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+ create_parser.set_defaults(func=cluster.create)
+
+ terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser])
+ terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation')
+ terminate_parser.set_defaults(func=cluster.terminate)
+
+ update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser])
+ update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation')
+ update_parser.set_defaults(func=cluster.update)
+
+ list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser])
+ list_parser.set_defaults(func=cluster.list)
+
+ args = parser.parse_args()
+
+ if 'terminate' == args.action and not args.force:
+ answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+ if answer not in ['y', 'Y']:
+ sys.stderr.write('\nACTION [terminate] aborted by user!\n')
+ exit(1)
+
+ if 'update' == args.action and not args.force:
+ answer = raw_input("This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+ if answer not in ['y', 'Y']:
+ sys.stderr.write('\nACTION [update] aborted by user!\n')
+ exit(1)
+
+ status = args.func(args)
+ if status != 0:
+ sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
+ exit(status)
diff --git a/bin/ohi b/bin/ohi
new file mode 100755
index 000000000..408961ee4
--- /dev/null
+++ b/bin/ohi
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import traceback
+import sys
+import os
+import re
+import tempfile
+import time
+import subprocess
+import ConfigParser
+
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
+
+class Ohi(object):
+ def __init__(self):
+ self.inventory = None
+ self.host_type_aliases = {}
+ self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
+ self.parse_cli_args()
+ self.parse_config_file()
+
+ self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+
+ def run(self):
+ if self.args.list_host_types:
+ self.aws.print_host_types()
+ return 0
+
+ hosts = None
+ if self.args.host_type is not None and \
+ self.args.env is not None:
+ # Both env and host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type, \
+ env=self.args.env)
+
+ if self.args.host_type is None and \
+ self.args.env is not None:
+ # Only env specified
+ hosts = self.aws.get_host_list(env=self.args.env)
+
+ if self.args.host_type is not None and \
+ self.args.env is None:
+ # Only host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type)
+
+ if hosts is None:
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
+
+ for host in hosts:
+ print host
+ return 0
+
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+ self.host_type_aliases = {}
+ if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+ self.host_type_aliases[alias] = value
+
+ def parse_cli_args(self):
+ """Setup the command line parser with the options we want
+ """
+
+ parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+
+ parser.add_argument('--list-host-types', default=False, action='store_true',
+ help='List all of the host types')
+
+ parser.add_argument('-e', '--env', action="store",
+ help="Which environment to use")
+
+ parser.add_argument('-t', '--host-type', action="store",
+ help="Which host type to use")
+
+ self.args = parser.parse_args()
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ print "\nError: No options given. Use --help to see the available options\n"
+ sys.exit(0)
+
+ try:
+ ohi = Ohi()
+ exitcode = ohi.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
+ print "\nError: %s\n" % e.message
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
new file mode 100644
index 000000000..c7db6f684
--- /dev/null
+++ b/bin/openshift-ansible-bin.spec
@@ -0,0 +1,65 @@
+Summary: OpenShift Ansible Scripts for working with metadata hosts
+Name: openshift-ansible-bin
+Version: 0.0.8
+Release: 1%{?dist}
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: %{name}-%{version}.tar.gz
+Requires: python2, openshift-ansible-inventory
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+
+cp -p ossh oscp opssh ohi %{buildroot}%{_bindir}
+cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
+
+cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+
+%files
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+%changelog
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
+- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
+
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
+- added the ability to run opssh and ohi on all hosts in an environment, as
+ well as all hosts of the same host-type regardless of environment
+ (twiest@redhat.com)
+- added ohi (twiest@redhat.com)
+* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- fixed bug where opssh would throw an exception if pssh returned a non-zero
+ exit code (twiest@redhat.com)
+
+* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- fixed the opssh default output behavior to be consistent with pssh. Also
+ fixed a bug in how directories are named for --outdir and --errdir.
+ (twiest@redhat.com)
+* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- created a python package named openshift_ansible (twiest@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
new file mode 100644
index 000000000..e891b855a
--- /dev/null
+++ b/bin/openshift_ansible.conf.example
@@ -0,0 +1,6 @@
+#[main]
+#inventory = /usr/share/ansible/inventory/multi_ec2.py
+
+#[host_type_aliases]
+#host-type-one = aliasa,aliasb
+#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/bin/openshift_ansible/__init__.py
diff --git a/bin/awsutil.py b/bin/openshift_ansible/awsutil.py
index 37259b946..65b269930 100644
--- a/bin/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -5,28 +5,36 @@ import os
import json
import re
+class ArgumentError(Exception):
+ def __init__(self, message):
+ self.message = message
+
class AwsUtil(object):
- def __init__(self):
- self.host_type_aliases = {
- 'legacy-openshift-broker': ['broker', 'ex-srv'],
- 'openshift-node': ['node', 'ex-node'],
- 'openshift-messagebus': ['msg'],
- 'openshift-customer-database': ['mongo'],
- 'openshift-website-proxy': ['proxy'],
- 'openshift-community-website': ['drupal'],
- 'package-mirror': ['mirror'],
- }
+ def __init__(self, inventory_path=None, host_type_aliases={}):
+ self.host_type_aliases = host_type_aliases
+ self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ if inventory_path is None:
+ inventory_path = os.path.realpath(os.path.join(self.file_path, \
+ '..', '..', 'inventory', \
+ 'multi_ec2.py'))
+
+ if not os.path.isfile(inventory_path):
+ raise Exception("Inventory file not found [%s]" % inventory_path)
+ self.inventory_path = inventory_path
+ self.setup_host_type_alias_lookup()
+
+ def setup_host_type_alias_lookup(self):
self.alias_lookup = {}
for key, values in self.host_type_aliases.iteritems():
for value in values:
self.alias_lookup[value] = key
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.multi_ec2_path = os.path.realpath(os.path.join(self.file_path, '..','inventory','multi_ec2.py'))
+
def get_inventory(self,args=[]):
- cmd = [self.multi_ec2_path]
+ cmd = [self.inventory_path]
if args:
cmd.extend(args)
@@ -124,15 +132,45 @@ class AwsUtil(object):
return self.alias_lookup[host_type]
return host_type
+ def gen_env_tag(self, env):
+ """Generate the environment tag
+ """
+ return "tag_environment_%s" % env
+
+ def gen_host_type_tag(self, host_type):
+ """Generate the host type tag
+ """
+ host_type = self.resolve_host_type(host_type)
+ return "tag_host-type_%s" % host_type
+
def gen_env_host_type_tag(self, host_type, env):
"""Generate the environment host type tag
"""
host_type = self.resolve_host_type(host_type)
return "tag_env-host-type_%s-%s" % (env, host_type)
- def get_host_list(self, host_type, env):
+ def get_host_list(self, host_type=None, env=None):
"""Get the list of hosts from the inventory using host-type and environment
"""
inv = self.get_inventory()
- host_type_tag = self.gen_env_host_type_tag(host_type, env)
- return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is not None:
+ # Both host type and environment were specified
+ env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+ return inv[env_host_type_tag]
+
+ if host_type is None and \
+ env is not None:
+ # Just environment was specified
+ host_type_tag = self.gen_env_tag(env)
+ return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is None:
+ # Just host-type was specified
+ host_type_tag = self.gen_host_type_tag(host_type)
+ return inv[host_type_tag]
+
+ # We should never reach here!
+ raise ArgumentError("Invalid combination of parameters")
diff --git a/bin/opssh b/bin/opssh
index 71e5bf9f2..a4fceb6a8 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -2,7 +2,6 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
@@ -10,54 +9,71 @@ import re
import tempfile
import time
import subprocess
+import ConfigParser
-DEFAULT_PSSH_PAR=200
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+DEFAULT_PSSH_PAR = 200
PSSH = '/usr/bin/pssh'
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Opssh(object):
def __init__(self):
+ self.inventory = None
+ self.host_type_aliases = {}
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.aws = awsutil.AwsUtil()
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
self.parse_cli_args()
+ self.parse_config_file()
+
+ self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+ def run(self):
if self.args.list_host_types:
self.aws.print_host_types()
- return
+ return 0
- if self.args.env and \
- self.args.host_type and \
- self.args.command:
- retval = self.run_pssh()
- if retval != 0:
- raise ValueError("pssh run failed")
+ if self.args.host_type is not None or \
+ self.args.env is not None:
+ return self.run_pssh()
- return
-
- # If it makes it here, we weren't able to determine what they wanted to do
- raise ValueError("Invalid combination of arguments")
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
def run_pssh(self):
"""Actually run the pssh command based off of the supplied options
"""
# Default set of options
- pssh_args = [PSSH, '-i', '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+ pssh_args = [PSSH, '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+
+ if self.args.inline:
+ pssh_args.append("--inline")
if self.args.outdir:
- pssh_args.append("--outdir='%s'" % self.args.outdir)
+ pssh_args.extend(["--outdir", self.args.outdir])
if self.args.errdir:
- pssh_args.append("--errdir='%s'" % self.args.errdir)
+ pssh_args.extend(["--errdir", self.args.errdir])
+
+ hosts = self.aws.get_host_list(host_type=self.args.host_type,
+ env=self.args.env)
- hosts = self.aws.get_host_list(self.args.host_type, self.args.env)
with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
for h in hosts:
f.write(h + os.linesep)
f.flush()
- pssh_args.extend(["-h", "%s" % f.name])
- pssh_args.append("%s" % self.args.command)
+ pssh_args.extend(["-h", f.name])
+ pssh_args.append(self.args.command)
print
print "Running: %s" % ' '.join(pssh_args)
@@ -66,6 +82,20 @@ class Opssh(object):
return None
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+ self.host_type_aliases = {}
+ if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+ self.host_type_aliases[alias] = value
def parse_cli_args(self):
"""Setup the command line parser with the options we want
@@ -79,7 +109,7 @@ class Opssh(object):
parser.add_argument('-e', '--env', action="store",
help="Which environment to use")
- parser.add_argument('-t', '--host-type', action="store",
+ parser.add_argument('-t', '--host-type', action="store", default=None,
help="Which host type to use")
parser.add_argument('-c', '--command', action='store',
@@ -88,6 +118,9 @@ class Opssh(object):
parser.add_argument('--user', action='store', default='root',
help='username')
+ parser.add_argument('-i', '--inline', default=False, action='store_true',
+ help='inline aggregated output and error for each server')
+
parser.add_argument('-p', '--par', action='store', default=DEFAULT_PSSH_PAR,
help=('max number of parallel threads (default %s)' % DEFAULT_PSSH_PAR))
@@ -107,5 +140,7 @@ if __name__ == '__main__':
try:
opssh = Opssh()
- except ValueError as e:
+ exitcode = opssh.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
print "\nError: %s\n" % e.message
diff --git a/bin/oscp b/bin/oscp
index 146bbbea5..461ad0a0f 100755
--- a/bin/oscp
+++ b/bin/oscp
@@ -2,21 +2,34 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Oscp(object):
def __init__(self):
+ self.inventory = None
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
self.parse_cli_args()
+ self.parse_config_file()
# parse host and user
self.process_host()
- self.aws = awsutil.AwsUtil()
+ self.aws = awsutil.AwsUtil(self.inventory)
# get a dict of host inventory
if self.args.list:
@@ -38,9 +51,18 @@ class Oscp(object):
else:
self.scp()
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
- parser.add_argument('-e', '--env',
+ parser.add_argument('-e', '--env',
action="store", help="Environment where this server exists.")
parser.add_argument('-d', '--debug', default=False,
action="store_true", help="debug mode")
diff --git a/bin/ossh b/bin/ossh
index 66a4cfb5c..c16ea6eda 100755
--- a/bin/ossh
+++ b/bin/ossh
@@ -2,18 +2,31 @@
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
-import awsutil
import traceback
import sys
import os
import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
class Ossh(object):
def __init__(self):
+ self.inventory = None
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
self.parse_cli_args()
+ self.parse_config_file()
- self.aws = awsutil.AwsUtil()
+ self.aws = awsutil.AwsUtil(self.inventory)
# get a dict of host inventory
if self.args.list:
@@ -37,6 +50,15 @@ class Ossh(object):
else:
self.ssh()
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
def parse_cli_args(self):
parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
parser.add_argument('-e', '--env', action="store",
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
index 6a95ce6ee..1467de858 100755
--- a/bin/ossh_bash_completion
+++ b/bin/ossh_bash_completion
@@ -1,6 +1,7 @@
__ossh_known_hosts(){
if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])'
+ /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
fi
}
@@ -16,3 +17,23 @@ _ossh()
return 0
}
complete -F _ossh ossh oscp
+
+__opssh_known_hosts(){
+ if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+ fi
+}
+
+_opssh()
+{
+ local cur prev known_hosts
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+ known_hosts="$(__opssh_known_hosts)"
+ COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
+
+ return 0
+}
+complete -F _opssh opssh
+
diff --git a/cluster.sh b/cluster.sh
deleted file mode 100755
index 9c9aad4d2..000000000
--- a/cluster.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/bin/bash -eu
-
-NODES=2
-MASTERS=1
-
-# If the environment variable OO_PROVDER is defined, it used for the provider
-PROVIDER=${OO_PROVIDER:-''}
-# Otherwise, default is gce (Google Compute Engine)
-if [ "x$PROVIDER" == "x" ];then
- PROVIDER=gce
-fi
-
-UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
-
-
-# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
-# otherwise use openshift default values.
-MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
-
-
-# @formatter:off
-function usage {
- cat 1>&2 <<-EOT
- ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
-
- Supported environment tags:
- $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
- $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
-
- Optional arguments for create:
- [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
-
- Optional arguments for terminate|update:
- [-p|--provider, --master-playbook, --node-playbook]
-EOT
-}
-# @formatter:on
-
-function create_cluster {
- ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
-
- ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
-
- update_cluster
-
- echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
-}
-
-function update_cluster {
- ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
- ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-function terminate_cluster {
- ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
- ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
-
-function check_argval {
- if [[ $1 == -* ]]; then
- echo "Invalid value: '$1'"
- usage
- exit 1
- fi
-}
-
-# Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
- -n "$0" -- "$@"`
-eval set -- "$OPTIONS"
-
-while true; do
- case "$1" in
- -h|--help) (usage; exit 1) ; shift ;;
- -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
- -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
- -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
- --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
- --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
- --) shift ; break ;;
- *) break ;;
- esac
-done
-
-shift $((OPTIND-1))
-
-[ -z "${1:-}" ] && (usage; exit 1)
-
-case "${1}" in
- 'create')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- create_cluster ;;
- 'update')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- update_cluster ;;
- 'terminate')
- [ -z "${2:-}" ] && (usage; exit 1)
- ENV="${2}"
- terminate_cluster ;;
- 'list') ./cloud.rb "${PROVIDER}" list ;;
- 'help') usage; exit 0 ;;
- *)
- echo -n 1>&2 "${1} is not a supported operation";
- usage;
- exit 1 ;;
-esac
-
-exit 0
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index b57056375..1cf02218c 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -1,39 +1,51 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
from ansible import errors, runner
import json
import pdb
def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+ ''' This pops you into a pdb instance where arg is the data passed in from the filter.
Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
+ '''
+ pdb.set_trace()
+ return arg
def oo_len(arg):
- ''' This returns the length of the argument
+ ''' This returns the length of the argument
Ex: "{{ hostvars | oo_len }}"
- '''
- return len(arg)
+ '''
+ return len(arg)
def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+ ''' This looks up dictionary attributes of the form a.b.c and returns the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
- '''
+ '''
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ ptr = data
+ for attr in attribute.split('.'):
+ ptr = ptr[attr]
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ return ptr
- ptr = data
- for attr in attribute.split('.'):
- ptr = ptr[attr]
+def oo_flatten(data):
+ ''' This filter plugin will flatten a list of lists
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [ item for sublist in data for item in sublist ]
- return ptr
def oo_collect(data, attribute=None, filters={}):
- ''' This takes a list of dict and collects all attributes specified into a list
- If filter is specified then we will include all items that match _ALL_ of filters.
+ ''' This takes a list of dict and collects all attributes specified into a list
+ If filter is specified then we will include all items that match _ALL_ of filters.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
@@ -42,44 +54,60 @@ def oo_collect(data, attribute=None, filters={}):
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
- '''
+ '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- if filters:
- retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
- else:
- retval = [get_attr(d, attribute) for d in data]
+ if filters:
+ retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+ else:
+ retval = [get_attr(d, attribute) for d in data]
- return retval
+ return retval
def oo_select_keys(data, keys):
- ''' This returns a list, which contains the value portions for the keys
+ ''' This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
- '''
+ '''
+
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
- if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+ if not issubclass(type(keys), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not issubclass(type(keys), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys]
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys]
+ return retval
- return retval
+def oo_prepend_strings_in_list(data, prepend):
+ ''' This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, basestring) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+ retval = [prepend + s for s in data]
+ return retval
class FilterModule (object):
- def filters(self):
- return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb
- }
+ def filters(self):
+ return {
+ "oo_select_keys": oo_select_keys,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_len": oo_len,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list
+ }
diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all
new file mode 100644
index 000000000..b22da00de
--- /dev/null
+++ b/inventory/aws/group_vars/all
@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root
diff --git a/inventory/byo/group_vars/all b/inventory/byo/group_vars/all
new file mode 100644
index 000000000..d63e96668
--- /dev/null
+++ b/inventory/byo/group_vars/all
@@ -0,0 +1,28 @@
+---
+# lets assume that we want to use the root as the ssh user for all hosts
+ansible_ssh_user: root
+
+# default debug level for all OpenShift hosts
+openshift_debug_level: 4
+
+# set the OpenShift deployment type for all hosts
+openshift_deployment_type: enterprise
+
+# Override the default registry for development
+openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Use latest Errata puddle as an additional repo:
+#openshift_additional_repos:
+#- id: ose-devel
+# name: ose-devel
+# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+# enabled: 1
+# gpgcheck: 0
+
+# Use latest Whitelist puddle as an additional repo:
+openshift_additional_repos:
+- id: ose-devel
+ name: ose-devel
+ baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+ enabled: 1
+ gpgcheck: 0
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
new file mode 100644
index 000000000..2dd854778
--- /dev/null
+++ b/inventory/byo/hosts
@@ -0,0 +1,10 @@
+# This is an example of a bring your own (byo) host inventory
+
+# host group for masters
+[masters]
+ose3-master-ansible.test.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2]-ansible.test.example.com
+
diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all
new file mode 100644
index 000000000..b22da00de
--- /dev/null
+++ b/inventory/gce/group_vars/all
@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root
diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all
new file mode 100644
index 000000000..b22da00de
--- /dev/null
+++ b/inventory/libvirt/group_vars/all
@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root
diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts
new file mode 100644
index 000000000..6a818f268
--- /dev/null
+++ b/inventory/libvirt/hosts
@@ -0,0 +1,2 @@
+# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
+localhost ansible_python_interpreter=/usr/bin/python2
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index 5dee7972b..26c09d712 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -12,6 +12,8 @@ import json
import pprint
+CONFIG_FILE_NAME = 'multi_ec2.yaml'
+
class MultiEc2(object):
def __init__(self):
@@ -20,11 +22,22 @@ class MultiEc2(object):
self.result = {}
self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
- self.config_file = os.path.join(self.file_path,"multi_ec2.yaml")
+
+ same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
+ etc_dir_config_file = os.path.join(os.path.sep, 'etc','ansible', CONFIG_FILE_NAME)
+
+ # Prefer a file in the same directory, fall back to a file in etc
+ if os.path.isfile(same_dir_config_file):
+ self.config_file = same_dir_config_file
+ elif os.path.isfile(etc_dir_config_file):
+ self.config_file = etc_dir_config_file
+ else:
+ self.config_file = None # expect env vars
+
self.parse_cli_args()
# load yaml
- if os.path.isfile(self.config_file):
+ if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
self.config = {}
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
new file mode 100644
index 000000000..8267e16f6
--- /dev/null
+++ b/inventory/openshift-ansible-inventory.spec
@@ -0,0 +1,50 @@
+Summary: OpenShift Ansible Inventories
+Name: openshift-ansible-inventory
+Version: 0.0.2
+Release: 1%{?dist}
+License: ASL 2.0
+URL: https://github.com/openshift/openshift-ansible
+Source0: %{name}-%{version}.tar.gz
+Requires: python2
+BuildRequires: python2-devel
+BuildArch: noarch
+
+%description
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}/usr/share/ansible/inventory
+mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
+mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
+
+cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
+cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+
+%files
+%config(noreplace) /etc/ansible/*
+%dir /usr/share/ansible/inventory
+/usr/share/ansible/inventory/multi_ec2.py*
+/usr/share/ansible/inventory/aws/ec2.py*
+%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
+/usr/share/ansible/inventory/gce/gce.py*
+
+%changelog
+* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added the ability to have a config file in /etc/openshift_ansible to
+ multi_ec2.py. (twiest@redhat.com)
+- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
+- gce inventory/playbook updates for node registration changes
+ (jdetiber@redhat.com)
+- Various fixes (jdetiber@redhat.com)
+
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+
diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/adhoc/noc/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/adhoc/noc/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
new file mode 100644
index 000000000..02bffc1d2
--- /dev/null
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -0,0 +1,41 @@
+---
+- name: 'Get current hosts who have triggers that are alerting by trigger description'
+ hosts: localhost
+ gather_facts: no
+ roles:
+ - os_zabbix
+ post_tasks:
+ - assert:
+ that: oo_desc is defined
+
+ - zbxapi:
+ server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+ zbx_class: Trigger
+ action: get
+ params:
+ only_true: true
+ output: extend
+ selectHosts: extend
+ searchWildCardsEnabled: 1
+ search:
+ description: "{{ oo_desc }}"
+ register: problems
+
+ - debug: var=problems
+
+ - set_fact:
+ problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
+
+ - debug: var=problem_hosts
+
+ - add_host:
+ name: "{{ item }}"
+ groups: problem_hosts_group
+ with_items: problem_hosts
+
+- name: "Run on problem hosts"
+ hosts: problem_hosts_group
+ gather_facts: no
+ tasks:
+ - command: "{{ oo_cmd }}"
+ when: oo_cmd is defined
diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/noc/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/adhoc/noc/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index 4c29fa833..56235bc8a 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -6,7 +6,7 @@
vars:
inst_region: us-east-1
- rhel7_ami: ami-a24e30ca
+ rhel7_ami: ami-906240f8
user_data_file: user_data.txt
vars_files:
diff --git a/playbooks/aws/openshift-cluster/filter_plugins b/playbooks/aws/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
new file mode 100644
index 000000000..3561c1803
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -0,0 +1,62 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: k8s_type="master"
+
+ - name: Generate master instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ master_names: "{{ master_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - set_fact: k8s_type="node"
+
+ - name: Generate node instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ node_names: "{{ node_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- hosts: "tag_env_{{ cluster_id }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+
+- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml
new file mode 100644
index 000000000..9d645fbe5
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/launch_instances.yml
@@ -0,0 +1,63 @@
+---
+- set_fact:
+ machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
+ machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
+ machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
+ machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
+ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+ security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
+ env: "{{ cluster }}"
+ host_type: "{{ type }}"
+ env_host_type: "{{ cluster }}-openshift-{{ type }}"
+
+- name: Launch instance(s)
+ ec2:
+ state: present
+ region: "{{ machine_region }}"
+ keypair: "{{ machine_keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ machine_type }}"
+ image: "{{ machine_image }}"
+ count: "{{ instances | oo_len }}"
+ wait: yes
+ instance_tags:
+ created-by: "{{ created_by }}"
+ env: "{{ env }}"
+ host-type: "{{ host_type }}"
+ env-host-type: "{{ env_host_type }}"
+ register: ec2
+
+- name: Add Name tag to instances
+ ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
+ with_together:
+ - instances
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+- set_fact:
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ groups: "{{ instance_groups }}"
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ with_together:
+ - instances
+ - ec2.instances
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.dns_name }}"
+ with_items: ec2.instances
+
+- name: Wait for root user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_items: ec2.instances
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
new file mode 100644
index 000000000..08e9e2df4
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: scratch_group is not defined
+ - add_host: name={{ item }} groups=oo_list_hosts
+ with_items: groups[scratch_group] | difference(['localhost'])
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ gather_facts: no
+ tasks:
+ - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"
diff --git a/playbooks/aws/openshift-cluster/roles b/playbooks/aws/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..39607633a
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -0,0 +1,14 @@
+---
+- name: Terminate instance(s)
+ hosts: localhost
+
+ vars_files:
+ - vars.yml
+
+- include: ../openshift-node/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+
+- include: ../openshift-master/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
new file mode 100644
index 000000000..90ecdc6ab
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env_{{ cluster_id }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 454cd6f24..1c4060eee 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,42 +1,24 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_masters_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
-- name: "Gather facts for nodes in {{ oo_env }}"
- hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
- connection: ssh
- user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_node_ips fact on localhost
- set_fact:
- openshift_node_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+- name: Configure instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
+ # TODO: this should be removed once openshift-sdn packages are available
+ openshift_use_openshift_sdn: False
vars_files:
- - vars.yml
+ - vars.yml
roles:
- - repos
- - {
- role: openshift_master,
- openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
- openshift_env: "{{ oo_env }}"
- openshift_public_ip: "{{ ec2_ip_address }}"
- }
+ - openshift_master
+ #- openshift_sdn_master
- pods
- os_env_extras
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index a889b93be..3d87879a0 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -45,14 +45,17 @@
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ groupname: oo_masters_to_config
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
with_together:
- oo_new_inst_names
- ec2.instances
- - debug: var=ec2
-
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml
new file mode 100644
index 000000000..fd15cf00f
--- /dev/null
+++ b/playbooks/aws/openshift-master/terminate.yml
@@ -0,0 +1,52 @@
+---
+- name: Populate oo_masters_to_terminate host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_host_group_exp if it's set
+ add_host: "name={{ item }} groups=oo_masters_to_terminate"
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+ hosts: oo_masters_to_terminate
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_terminate']) }}"
+ tasks:
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+ when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: item.failed
+ with_items: ec2_term.results
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
index fb5f4ea42..c196b2fca 100644
--- a/playbooks/aws/openshift-master/vars.yml
+++ b/playbooks/aws/openshift-master/vars.yml
@@ -1,2 +1,3 @@
---
openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index 9662168c4..b08ed7571 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,49 +1,107 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_nodes_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
+ - add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ when: oo_host_group_exp is defined
-- name: "Gather facts for masters in {{ oo_env }}"
- hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
- connection: ssh
- user: root
-- name: "Set OO sepcific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
+- name: Gather and set facts for hosts to configure
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ ec2_private_ip_address }}"
+ public_hostname: "{{ ec2_ip_address }}"
+ # TODO: this should be removed once openshift-sdn packages are available
+ use_openshift_sdn: False
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+ labels: "{{ openshfit_node_labels | default(None) }}"
+ annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ roles:
+ - openshift_register_nodes
tasks:
- - name: Setting openshift_master_ips fact on localhost
- set_fact:
- openshift_master_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
- - name: Setting openshift_master_public_ips fact on localhost
- set_fact:
- openshift_master_public_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ec2_ip_address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+
+- name: Configure instances
+ hosts: oo_nodes_to_config
vars_files:
- - vars.yml
+ - vars.yml
+ vars:
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
+ sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ sync_tmpdir }} state=absent
+ run_once: true
roles:
- - repos
- - docker
- - {
- role: openshift_node,
- openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
- openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
- openshift_env: "{{ oo_env }}"
- openshift_public_ip: "{{ ec2_ip_address }}"
- }
+ - openshift_node
+ #- openshift_sdn_node
- os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index a889b93be..b7ef593e7 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -27,7 +27,9 @@
register: ec2
- name: Add new instances public IPs to the atomic proxy host group
- add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+ add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: new_ec2_instances"
with_items: ec2.instances
- name: Add Name and environment tags to instances
@@ -45,14 +47,17 @@
args:
tags: "{{ oo_new_inst_tags }}"
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ groupname: oo_nodes_to_config
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
with_together:
- oo_new_inst_names
- ec2.instances
- - debug: var=ec2
-
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml
new file mode 100644
index 000000000..1c0c77eb7
--- /dev/null
+++ b/playbooks/aws/openshift-node/terminate.yml
@@ -0,0 +1,52 @@
+---
+- name: Populate oo_nodes_to_terminate host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_host_group_exp if it's set
+ add_host: "name={{ item }} groups=oo_nodes_to_terminate"
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+ hosts: oo_nodes_to_terminate
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars:
+ host_vars: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
+ tasks:
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ item.ec2_id }}"]
+ region: "{{ item.ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: host_vars
+
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+ when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+ with_items: ec2_term.results
+
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: item.failed
+ with_items: ec2_term.results
+
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: ec2_stop.results
+
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
index fb5f4ea42..c196b2fca 100644
--- a/playbooks/aws/openshift-node/vars.yml
+++ b/playbooks/aws/openshift-node/vars.yml
@@ -1,2 +1,3 @@
---
openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/byo/config.yml b/playbooks/byo/config.yml
new file mode 100644
index 000000000..dce49d32f
--- /dev/null
+++ b/playbooks/byo/config.yml
@@ -0,0 +1,6 @@
+---
+- name: Run the openshift-master config playbook
+ include: openshift-master/config.yml
+
+- name: Run the openshift-node config playbook
+ include: openshift-node/config.yml
diff --git a/playbooks/byo/filter_plugins b/playbooks/byo/filter_plugins
new file mode 120000
index 000000000..a4f518f07
--- /dev/null
+++ b/playbooks/byo/filter_plugins
@@ -0,0 +1 @@
+../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
new file mode 100644
index 000000000..706f9285c
--- /dev/null
+++ b/playbooks/byo/openshift-master/config.yml
@@ -0,0 +1,9 @@
+---
+- name: Gather facts for node hosts
+ hosts: nodes
+
+- name: Configure master instances
+ hosts: masters
+ roles:
+ - openshift_master
+ - openshift_sdn_master
diff --git a/playbooks/byo/openshift-master/filter_plugins b/playbooks/byo/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-master/roles b/playbooks/byo/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
new file mode 100644
index 000000000..69ad7a840
--- /dev/null
+++ b/playbooks/byo/openshift-node/config.yml
@@ -0,0 +1,79 @@
+---
+- name: Gather facts for node hosts
+ hosts: nodes
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: 'node'
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+ labels: "{{ openshfit_node_labels | default(None) }}"
+ annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+ hosts: masters[0]
+ vars:
+ openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+
+- name: Configure node instances
+ hosts: nodes
+ vars:
+ sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ sync_tmpdir }} state=absent
+ run_once: true
+ roles:
+ - openshift_node
+ - openshift_sdn_node
diff --git a/playbooks/byo/openshift-node/filter_plugins b/playbooks/byo/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-node/roles b/playbooks/byo/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/byo/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/byo/roles b/playbooks/byo/roles
new file mode 120000
index 000000000..b741aa3db
--- /dev/null
+++ b/playbooks/byo/roles
@@ -0,0 +1 @@
+../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/gce/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
new file mode 100644
index 000000000..14cdd2537
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -0,0 +1,62 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: k8s_type="master"
+
+ - name: Generate master instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: master_names_output
+ with_sequence: start=1 end={{ num_masters }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ master_names: "{{ master_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - set_fact: k8s_type="node"
+
+ - name: Generate node instance names(s)
+ set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+ register: node_names_output
+ with_sequence: start=1 end={{ num_nodes }}
+
+ # These set_fact's cannot be combined
+ - set_fact:
+ node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+ - set_fact:
+ node_names: "{{ node_names_string.strip().split(' ') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- hosts: "tag_env-{{ cluster_id }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+
+- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml
new file mode 100644
index 000000000..b4f33bd87
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/launch_instances.yml
@@ -0,0 +1,44 @@
+---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
+- set_fact:
+ machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
+ machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
+
+- name: Launch instance(s)
+ gce:
+ instance_names: "{{ instances }}"
+ machine_type: "{{ machine_type }}"
+ image: "{{ machine_image }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ tags:
+ - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
+ - "env-{{ cluster }}"
+ - "host-type-{{ type }}"
+ - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+ register: gce
+
+- name: Add new instances to groups and set variables needed
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+ gce_public_ip: "{{ item.public_ip }}"
+ gce_private_ip: "{{ item.private_ip }}"
+ with_items: gce.instance_data
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.public_ip }}"
+ with_items: gce.instance_data
+
+- name: Wait for root user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_items: gce.instance_data
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
new file mode 100644
index 000000000..1124b0ea3
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: scratch_group is not defined
+ - add_host: name={{ item }} groups=oo_list_hosts
+ with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ gather_facts: no
+ tasks:
+ - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"
diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/gce/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..0281ae953
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -0,0 +1,20 @@
+---
+- name: Terminate instance(s)
+ hosts: localhost
+
+ vars_files:
+ - vars.yml
+
+- include: ../openshift-node/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+ gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
new file mode 100644
index 000000000..973e4c3ef
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env-{{ cluster_id }}"
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index ae598b622..857da0763 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,42 +1,20 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: master/config.yml, populate oo_masters_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: "Evaluate oo_host_group_exp if it's set"
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_masters_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
-- name: "Gather facts for nodes in {{ oo_env }}"
- hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
- connection: ssh
- user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Setting openshift_node_ips fact on localhost
- set_fact:
- openshift_node_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
-
- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+ hosts: oo_masters_to_config
+ vars:
+ openshift_hostname: "{{ gce_private_ip }}"
vars_files:
- - vars.yml
+ - vars.yml
roles:
- - repos
- - {
- role: openshift_master,
- openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
+ - openshift_master
- pods
- os_env_extras
diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml
index f2800b061..287596002 100644
--- a/playbooks/gce/openshift-master/launch.yml
+++ b/playbooks/gce/openshift-master/launch.yml
@@ -1,4 +1,8 @@
---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
- name: Launch instance(s)
hosts: localhost
connection: local
@@ -24,16 +28,18 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_masters_to_config
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groupname: oo_masters_to_config
+ gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
wait_for: "port=22 host={{ item.public_ip }}"
with_items: gce.instance_data
- - debug: var=gce
-
- name: Wait for root user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
register: result
diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml
index 76e1404b5..8319774f8 100644
--- a/playbooks/gce/openshift-master/terminate.yml
+++ b/playbooks/gce/openshift-master/terminate.yml
@@ -1,20 +1,17 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_masters_to_terminate host group if needed
hosts: localhost
gather_facts: no
tasks:
- - debug: var=oo_host_group_exp
-
- name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+ add_host: "name={{ item }} groups=oo_masters_to_terminate"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
- - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate master instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate master instances
gce:
@@ -22,12 +19,10 @@
pem_file: "{{ gce_pem_file }}"
project_id: "{{ gce_project_id }}"
state: 'absent'
- instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
- disks: "{{ groups['oo_hosts_to_terminate'] }}"
+ instance_names: "{{ groups['oo_masters_to_terminate'] }}"
+ disks: "{{ groups['oo_masters_to_terminate'] }}"
register: gce
- - debug: var=gce
-
- name: Remove disks of instances
gce_pd:
service_account_email: "{{ gce_service_account_email }}"
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
index fb5f4ea42..c196b2fca 100644
--- a/playbooks/gce/openshift-master/vars.yml
+++ b/playbooks/gce/openshift-master/vars.yml
@@ -1,2 +1,3 @@
---
openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 85f34e814..771cc3a94 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,49 +1,100 @@
---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
hosts: localhost
gather_facts: no
tasks:
- name: Evaluate oo_host_group_exp
- add_host: "name={{ item }} groups=oo_hosts_to_config"
+ add_host: "name={{ item }} groups=oo_nodes_to_config"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
+ - add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ when: oo_host_group_exp is defined
-- name: "Gather facts for masters in {{ oo_env }}"
- hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
- connection: ssh
- user: root
-- name: "Set OO sepcific facts on localhost (for later use)"
- hosts: localhost
- gather_facts: no
+- name: Gather and set facts for hosts to configure
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ gce_private_ip }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+ labels: "{{ openshfit_node_labels | default(None) }}"
+ annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ roles:
+ - openshift_register_nodes
tasks:
- - name: Setting openshift_master_ips fact on localhost
- set_fact:
- openshift_master_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_default_ipv4.address') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
- - name: Setting openshift_master_public_ips fact on localhost
- set_fact:
- openshift_master_public_ips: "{{ hostvars
- | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='gce_public_ip') }}"
- when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+- name: Configure instances
+ hosts: oo_nodes_to_config
vars_files:
- - vars.yml
+ - vars.yml
+ vars:
+ sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ sync_tmpdir }} state=absent
+ run_once: true
roles:
- - repos
- - docker
- - {
- role: openshift_node,
- openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
- openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
- openshift_public_ip: "{{ gce_public_ip }}",
- openshift_env: "{{ oo_env }}",
- }
+ - openshift_node
- os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index 935599efd..73d0478ab 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -1,4 +1,8 @@
---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
- name: Launch instance(s)
hosts: localhost
connection: local
@@ -24,16 +28,18 @@
tags: "{{ oo_new_inst_tags }}"
register: gce
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+ - name: Add new instances public IPs to oo_nodes_to_config
+ add_host:
+ hostname: "{{ item.name }}"
+ ansible_ssh_host: "{{ item.public_ip }}"
+ groupname: oo_nodes_to_config
+ gce_private_ip: "{{ item.private_ip }}"
with_items: gce.instance_data
- name: Wait for ssh
wait_for: "port=22 host={{ item.public_ip }}"
with_items: gce.instance_data
- - debug: var=gce
-
- name: Wait for root user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
register: result
@@ -45,13 +51,3 @@
# Apply the configs, separate so that just the configs can be run by themselves
- include: config.yml
-
-# Always bounce service to pick up new credentials
-#- name: "Restart instances"
-# hosts: oo_hosts_to_config
-# connection: ssh
-# user: root
-# tasks:
-# - debug: var=groups.oo_hosts_to_config
-# - name: Restart OpenShift
-# service: name=openshift-node enabled=yes state=restarted
diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml
index 8d60f27b3..7d71dfcab 100644
--- a/playbooks/gce/openshift-node/terminate.yml
+++ b/playbooks/gce/openshift-node/terminate.yml
@@ -1,20 +1,17 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_nodes_to_terminate host group if needed
hosts: localhost
gather_facts: no
tasks:
- - debug: var=oo_host_group_exp
-
- name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+ add_host: "name={{ item }} groups=oo_nodes_to_terminate"
with_items: "{{ oo_host_group_exp | default('') }}"
when: oo_host_group_exp is defined
- - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate node instances
hosts: localhost
connection: local
+ gather_facts: no
tasks:
- name: Terminate node instances
gce:
@@ -22,12 +19,10 @@
pem_file: "{{ gce_pem_file }}"
project_id: "{{ gce_project_id }}"
state: 'absent'
- instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
- disks: "{{ groups['oo_hosts_to_terminate'] }}"
+ instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
+ disks: "{{ groups['oo_nodes_to_terminate'] }}"
register: gce
- - debug: var=gce
-
- name: Remove disks of instances
gce_pd:
service_account_email: "{{ gce_service_account_email }}"
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
index fb5f4ea42..c196b2fca 100644
--- a/playbooks/gce/openshift-node/vars.yml
+++ b/playbooks/gce/openshift-node/vars.yml
@@ -1,2 +1,3 @@
---
openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
new file mode 100644
index 000000000..6f2df33af
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -0,0 +1,65 @@
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+ libvirt_storage_pool: 'openshift'
+ libvirt_uri: 'qemu:///system'
+
+ vars_files:
+ - vars.yml
+
+ tasks:
+ - set_fact:
+ k8s_type: master
+
+ - name: Generate master instance name(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+ register: master_names_output
+ with_sequence: start=1 end='{{ num_masters }}'
+
+ - set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: '{{ master_names }}'
+ cluster: '{{ cluster_id }}'
+ type: '{{ k8s_type }}'
+ group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+
+ - set_fact:
+ k8s_type: node
+
+ - name: Generate node instance name(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+ register: node_names_output
+ with_sequence: start=1 end='{{ num_nodes }}'
+
+ - set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: '{{ node_names }}'
+ cluster: '{{ cluster_id }}'
+ type: '{{ k8s_type }}'
+
+- hosts: 'tag_env-{{ cluster_id }}'
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+ oo_env: '{{ cluster_id }}'
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+ oo_env: '{{ cluster_id }}'
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml
new file mode 100644
index 000000000..3bbcae981
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/launch_instances.yml
@@ -0,0 +1,102 @@
+- name: Create the libvirt storage directory for openshift
+ file:
+ dest: '{{ libvirt_storage_pool_path }}'
+ state: directory
+
+- name: Download Base Cloud image
+ get_url:
+ url: '{{ base_image_url }}'
+ sha256sum: '{{ base_image_sha256 }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+
+- name: Create the cloud-init config drive path
+ file:
+ dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ state: directory
+ with_items: '{{ instances }}'
+
+- name: Create the cloud-init config drive files
+ template:
+ src: '{{ item[1] }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+ with_nested:
+ - '{{ instances }}'
+ - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+ command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ args:
+ chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: '{{ instances }}'
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ ignore_errors: yes
+
+- name: Refresh the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+
+- name: Create VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
+ with_items: '{{ instances }}'
+
+- name: Create VMs
+ virt:
+ name: '{{ item }}'
+ command: define
+ xml: "{{ lookup('template', '../templates/domain.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items: '{{ instances }}'
+
+- name: Start VMs
+ virt:
+ name: '{{ item }}'
+ state: running
+ uri: '{{ libvirt_uri }}'
+ with_items: '{{ instances }}'
+
+- name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: '{{ instances }}'
+
+- name: Wait for the VMs to get an IP
+ command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+ ignore_errors: yes
+ register: nb_allocated_ips
+ until: nb_allocated_ips.stdout == '{{ instances | length }}'
+ retries: 30
+ delay: 1
+
+- name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: '{{ scratch_mac.results }}'
+
+- set_fact:
+ ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+ add_host:
+ hostname: '{{ item.0 }}'
+ ansible_ssh_host: '{{ item.1 }}'
+ ansible_ssh_user: root
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ with_together:
+ - instances
+ - ips
+
+- name: Wait for ssh
+ wait_for:
+ host: '{{ item }}'
+ port: 22
+ with_items: ips
+
+- name: Wait for root user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_items: ips
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
new file mode 100644
index 000000000..6bf07e3c6
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -0,0 +1,43 @@
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_uri: 'qemu:///system'
+
+ tasks:
+ - name: List VMs
+ virt:
+ command: list_vms
+ register: list_vms
+
+ - name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: '{{ list_vms.list_vms }}'
+ when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: '{{ scratch_mac.results }}'
+ when: item.skipped is not defined
+
+ - name: Add hosts
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[1].stdout }}'
+ ansible_ssh_user: root
+ groups: oo_list_hosts
+ with_together:
+ - '{{ list_vms.list_vms }}'
+ - '{{ scratch_ip.results }}'
+ when: item[1].skipped is not defined
+
+- name: List Hosts
+ hosts: oo_list_hosts
+
+ tasks:
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..c609169d3
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -0,0 +1,41 @@
+- name: Terminate instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+ libvirt_storage_pool: 'openshift'
+ libvirt_uri: 'qemu:///system'
+
+ tasks:
+ - name: List VMs
+ virt:
+ command: list_vms
+ register: list_vms
+
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - '{{ list_vms.list_vms }}'
+ - [ destroy, undefine ]
+ when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Delete VMs config drive
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
+ state: absent
+ with_items: '{{ list_vms.list_vms }}'
+ when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
+ with_nested:
+ - '{{ list_vms.list_vms }}'
+ - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
+ when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
new file mode 100644
index 000000000..4e4eecd46
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -0,0 +1,7 @@
+# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+
+base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
+base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml
new file mode 100644
index 000000000..dd95fd57f
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/config.yml
@@ -0,0 +1,21 @@
+- name: master/config.yml, populate oo_masters_to_config host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: "Evaluate oo_host_group_exp if it's set"
+ add_host:
+ name: '{{ item }}'
+ groups: oo_masters_to_config
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+- name: Configure instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_hostname: '{{ ansible_default_ipv4.address }}'
+ vars_files:
+ - vars.yml
+ roles:
+ - openshift_master
+ - pods
+ - os_env_extras
diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/libvirt/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/libvirt/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml
new file mode 100644
index 000000000..ad0c0fbe2
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/vars.yml
@@ -0,0 +1 @@
+openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml
new file mode 100644
index 000000000..3244a8046
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/config.yml
@@ -0,0 +1,102 @@
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: "Evaluate oo_host_group_exp if it's set"
+ add_host:
+ name: '{{ item }}'
+ groups: oo_nodes_to_config
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+ - add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ when: oo_host_group_exp is defined
+
+
+- name: Gather and set facts for hosts to configure
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ ansible_default_ipv4.address }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+ labels: "{{ openshfit_node_labels | default(None) }}"
+ annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+- name: Configure instances
+ hosts: oo_nodes_to_config
+ vars_files:
+ - vars.yml
+ vars:
+ sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ sync_tmpdir }} state=absent
+ run_once: true
+ roles:
+ - openshift_node
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/libvirt/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml
new file mode 100644
index 000000000..ad0c0fbe2
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/vars.yml
@@ -0,0 +1 @@
+openshift_debug_level: 4
diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/templates/domain.xml
new file mode 100644
index 000000000..da037d138
--- /dev/null
+++ b/playbooks/libvirt/templates/domain.xml
@@ -0,0 +1,62 @@
+<domain type='kvm' id='8'>
+ <name>{{ item }}</name>
+ <memory unit='GiB'>1</memory>
+ <currentMemory unit='GiB'>1</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup'/>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <target dev='vdb' bus='virtio'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' />
+ <interface type='network'>
+ <source network='default'/>
+ <model type='virtio'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <channel type='spicevmc'>
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>
+ <input type='tablet' bus='usb' />
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='spice' autoport='yes' />
+ <video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
+ </video>
+ <redirdev bus='usb' type='spicevmc'>
+ </redirdev>
+ <memballoon model='virtio'>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data
new file mode 100644
index 000000000..5d779519f
--- /dev/null
+++ b/playbooks/libvirt/templates/meta-data
@@ -0,0 +1,2 @@
+instance-id: {{ item[0] }}
+local-hostname: {{ item[0] }}
diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data
new file mode 100644
index 000000000..985badc8e
--- /dev/null
+++ b/playbooks/libvirt/templates/user-data
@@ -0,0 +1,10 @@
+#cloud-config
+
+disable_root: 0
+
+system_info:
+ default_user:
+ name: root
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
diff --git a/rel-eng/packages/.readme b/rel-eng/packages/.readme
new file mode 100644
index 000000000..8999c8dbc
--- /dev/null
+++ b/rel-eng/packages/.readme
@@ -0,0 +1,3 @@
+the rel-eng/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
new file mode 100644
index 000000000..500e1f4b1
--- /dev/null
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -0,0 +1 @@
+0.0.8-1 bin/
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
new file mode 100644
index 000000000..cf3ac87ed
--- /dev/null
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -0,0 +1 @@
+0.0.2-1 inventory/
diff --git a/rel-eng/tito.props b/rel-eng/tito.props
new file mode 100644
index 000000000..eab3f190d
--- /dev/null
+++ b/rel-eng/tito.props
@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index f58a5b1c2..1d75a95e6 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -9,6 +9,7 @@
- ansible
- telnet
- ack
+ - python-ansible-tower-cli
- name: download Tower setup
get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -25,3 +26,9 @@
- name: Open firewalld port for https
firewalld: port=8080/tcp permanent=true state=enabled
+- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect state=yes persistent=yes
+
+- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
+
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 2ecefd588..ca700db17 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -11,5 +11,5 @@
# From the origin rpm there exists instructions on how to
# setup origin properly. The following steps come from there
- name: Change root to be in the Docker group
- user: name=root groups=docker append=yes
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
new file mode 100644
index 000000000..69a07effd
--- /dev/null
+++ b/roles/openshift_ansible_inventory/README.md
@@ -0,0 +1,41 @@
+Openshift Ansible Inventory
+=========
+
+Install and configure openshift-ansible-inventory.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+oo_inventory_group
+oo_inventory_user
+oo_inventory_accounts
+oo_inventory_cache_max_age
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
new file mode 100644
index 000000000..f53c00c80
--- /dev/null
+++ b/roles/openshift_ansible_inventory/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+oo_inventory_group: root
+oo_inventory_owner: root
+oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
new file mode 100644
index 000000000..e2db43477
--- /dev/null
+++ b/roles/openshift_ansible_inventory/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
new file mode 100644
index 000000000..ff3df0a7d
--- /dev/null
+++ b/roles/openshift_ansible_inventory/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Install and configure openshift-ansible-inventory
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
new file mode 100644
index 000000000..3990d5750
--- /dev/null
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- yum:
+ name: openshift-ansible-inventory
+ state: present
+
+- template:
+ src: multi_ec2.yaml.j2
+ dest: /etc/ansible/multi_ec2.yaml
+ group: "{{ oo_inventory_group }}"
+ owner: "{{ oo_inventory_owner }}"
+ mode: "0640"
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
new file mode 100644
index 000000000..23dfe73b8
--- /dev/null
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -0,0 +1,11 @@
+# multi ec2 inventory configs
+cache_max_age: {{ oo_inventory_cache_max_age }}
+accounts:
+{% for account in oo_inventory_accounts %}
+ - name: {{ account.name }}
+ provider: {{ account.provider }}
+ env_vars:
+ AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+
+{% endfor %}
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
new file mode 100644
index 000000000..25c049282
--- /dev/null
+++ b/roles/openshift_ansible_inventory/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for openshift_ansible_inventory
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
index c2ae609ff..14c2037e4 100644
--- a/roles/openshift_common/README.md
+++ b/roles/openshift_common/README.md
@@ -12,19 +12,21 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
Role Variables
--------------
-| Name | Default value | |
-|-------------------------------|------------------------------|----------------------------------------|
-| openshift_bind_ip | ansible_default_ipv4.address | IP to use for local binding |
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_env | default | Envrionment name if multiple OpenShift instances |
+| Name | Default value | |
+|---------------------------|-------------------|---------------------------------------------|
+| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters |
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) |
+| openshift_ip | UNDEF | Internal IP address to use for this host |
+| openshift_public_hostname | UNDEF | Public hostname to use for this host |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
Dependencies
------------
os_firewall
+openshift_facts
+openshift_repos
Example Playbook
----------------
@@ -39,4 +41,4 @@ Apache License, Version 2.0
Author Information
------------------
-TODO
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
index a541591fb..4d3e0fe9e 100644
--- a/roles/openshift_common/defaults/main.yml
+++ b/roles/openshift_common/defaults/main.yml
@@ -1,8 +1,3 @@
---
-openshift_bind_ip: "{{ ansible_default_ipv4.address }}"
+openshift_cluster_id: 'default'
openshift_debug_level: 0
-
-# TODO: Once openshift stops resolving hostnames for node queries remove
-# this...
-openshift_hostname_workaround: true
-openshift_hostname: "{{ openshift_public_ip if openshift_hostname_workaround else ansible_fqdn }}"
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
index 88b7677d0..81363ec68 100644
--- a/roles/openshift_common/meta/main.yml
+++ b/roles/openshift_common/meta/main.yml
@@ -13,3 +13,5 @@ galaxy_info:
- cloud
dependencies:
- { role: os_firewall }
+- { role: openshift_facts }
+- { role: openshift_repos }
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 728bba4e4..941190534 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -1,21 +1,16 @@
---
-# fixme: Once openshift stops resolving hostnames for node queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
- hostname: name={{ openshift_bind_ip }}
- when: openshift_hostname_workaround
+- name: Set common OpenShift facts
+ openshift_facts:
+ role: 'common'
+ local_facts:
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
+ debug_level: "{{ openshift_debug_level | default(0) }}"
+ hostname: "{{ openshift_hostname | default(None) }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
-- name: Configure local facts file
- file: path=/etc/ansible/facts.d/ state=directory mode=0750
+- name: Set hostname
+ hostname: name={{ openshift.common.hostname }}
-- name: Set common OpenShift facts
- include: set_facts.yml
- facts:
- - section: common
- option: env
- value: "{{ openshift_env | default('default') }}"
- - section: common
- option: host_type
- value: "{{ openshift_host_type }}"
- - section: common
- option: debug_level
- value: "{{ openshift_debug_level }}"
diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml
deleted file mode 100644
index 349eecd1d..000000000
--- a/roles/openshift_common/tasks/set_facts.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: "Setting local_facts"
- ini_file:
- dest: /etc/ansible/facts.d/openshift.fact
- mode: 0640
- section: "{{ item.section }}"
- option: "{{ item.option }}"
- value: "{{ item.value }}"
- with_items: facts
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 623aed9bf..50816d319 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -1,6 +1,7 @@
---
-openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/
-
# TODO: Upstream kubernetes only supports iptables currently, if this changes,
# then these variable should be moved to defaults
+# TODO: it might be possible to still use firewalld if we wire up the created
+# chains with the public zone (or the zone associated with the correct
+# interfaces)
os_firewall_use_firewalld: False
diff --git a/roles/openshift_facts/README.md b/roles/openshift_facts/README.md
new file mode 100644
index 000000000..2fd50e236
--- /dev/null
+++ b/roles/openshift_facts/README.md
@@ -0,0 +1,34 @@
+OpenShift Facts
+===============
+
+Provides the openshift_facts module
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
new file mode 100755
index 000000000..0dd343443
--- /dev/null
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -0,0 +1,482 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+DOCUMENTATION = '''
+---
+module: openshift_facts
+short_description: OpenShift Facts
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+import ConfigParser
+import copy
+
+class OpenShiftFactsUnsupportedRoleError(Exception):
+ pass
+
+class OpenShiftFactsFileWriteError(Exception):
+ pass
+
+class OpenShiftFacts():
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn']
+
+ def __init__(self, role, filename, local_facts):
+ self.changed = False
+ self.filename = filename
+ if role not in self.known_roles:
+ raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role)
+ self.role = role
+ self.facts = self.generate_facts(local_facts)
+
+ def generate_facts(self, local_facts):
+ local_facts = self.init_local_facts(local_facts)
+ roles = local_facts.keys()
+
+ defaults = self.get_defaults(roles)
+ provider_facts = self.init_provider_facts()
+ facts = self.apply_provider_facts(defaults, provider_facts, roles)
+
+ facts = self.merge_facts(facts, local_facts)
+ facts['current_config'] = self.current_config(facts)
+ self.set_url_facts_if_unset(facts)
+ return dict(openshift=facts)
+
+
+ def set_url_facts_if_unset(self, facts):
+ if 'master' in facts:
+ for (url_var, use_ssl, port, default) in [
+ ('api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['hostname']),
+ ('public_api_url',
+ facts['master']['api_use_ssl'],
+ facts['master']['api_port'],
+ facts['common']['public_hostname']),
+ ('console_url',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['hostname']),
+ ('public_console_url' 'console_use_ssl',
+ facts['master']['console_use_ssl'],
+ facts['master']['console_port'],
+ facts['common']['public_hostname'])]:
+ if url_var not in facts['master']:
+ scheme = 'https' if use_ssl else 'http'
+ netloc = default
+ if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'):
+ netloc = "%s:%s" % (netloc, port)
+ facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', ''))
+
+
+ # Query current OpenShift config and return a dictionary containing
+ # settings that may be valuable for determining actions that need to be
+ # taken in the playbooks/roles
+ def current_config(self, facts):
+ current_config=dict()
+ roles = [ role for role in facts if role not in ['common','provider'] ]
+ for role in roles:
+ if 'roles' in current_config:
+ current_config['roles'].append(role)
+ else:
+ current_config['roles'] = [role]
+
+ # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
+ # determine the location of files.
+
+ # Query kubeconfig settings
+ kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates'
+ if role == 'node':
+ kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname'])
+
+ kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
+ if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
+ try:
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+ "config", "view", "-o",
+ "json",
+ "--kubeconfig=%s" % kubeconfig_path],
+ check_rc=False)
+ config = json.loads(output)
+
+ try:
+ for cluster in config['clusters']:
+ config['clusters'][cluster]['certificate-authority-data'] = 'masked'
+ except KeyError:
+ pass
+ try:
+ for user in config['users']:
+ config['users'][user]['client-certificate-data'] = 'masked'
+ config['users'][user]['client-key-data'] = 'masked'
+ except KeyError:
+ pass
+
+ current_config['kubeconfig'] = config
+ except Exception:
+ pass
+
+ return current_config
+
+
+ def apply_provider_facts(self, facts, provider_facts, roles):
+ if not provider_facts:
+ return facts
+
+ use_openshift_sdn = provider_facts.get('use_openshift_sdn')
+ if isinstance(use_openshift_sdn, bool):
+ facts['common']['use_openshift_sdn'] = use_openshift_sdn
+
+ common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
+ for h_var, ip_var in common_vars:
+ ip_value = provider_facts['network'].get(ip_var)
+ if ip_value:
+ facts['common'][ip_var] = ip_value
+
+ facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var])
+
+ if 'node' in roles:
+ ext_id = provider_facts.get('external_id')
+ if ext_id:
+ facts['node']['external_id'] = ext_id
+
+ facts['provider'] = provider_facts
+ return facts
+
+ def hostname_valid(self, hostname):
+ if (not hostname or
+ hostname.startswith('localhost') or
+ hostname.endswith('localdomain') or
+ len(hostname.split('.')) < 2):
+ return False
+
+ return True
+
+ def choose_hostname(self, hostnames=[], fallback=''):
+ hostname = fallback
+
+ ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ]
+ hosts = [ i for i in hostnames if i is not None and i not in set(ips) ]
+
+ for host_list in (hosts, ips):
+ for h in host_list:
+ if self.hostname_valid(h):
+ return h
+
+ return hostname
+
+ def get_defaults(self, roles):
+ hardware_facts = self.get_hardware_facts()
+ net_facts = self.get_net_facts()
+ base_facts = self.get_base_facts()
+
+ defaults = dict()
+
+ common = dict(use_openshift_sdn=True)
+ ip = net_facts['default_ipv4']['address']
+ common['ip'] = ip
+ common['public_ip'] = ip
+
+ rc, output, error = module.run_command(['hostname', '-f'])
+ hostname_f = output.strip() if rc == 0 else ''
+ hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']]
+ hostname = self.choose_hostname(hostname_values)
+
+ common['hostname'] = hostname
+ common['public_hostname'] = hostname
+ defaults['common'] = common
+
+ if 'master' in roles:
+ # TODO: provide for a better way to override just the port, or just
+ # the urls, instead of forcing both, also to override the hostname
+ # without having to re-generate these urls later
+ master = dict(api_use_ssl=True, api_port='8443',
+ console_use_ssl=True, console_path='/console',
+ console_port='8443', etcd_use_ssl=False,
+ etcd_port='4001')
+ defaults['master'] = master
+
+ if 'node' in roles:
+ node = dict(external_id=common['hostname'], pod_cidr='',
+ labels={}, annotations={})
+ node['resources_cpu'] = hardware_facts['processor_cores']
+ node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75)
+ defaults['node'] = node
+
+ return defaults
+
+ def merge_facts(self, orig, new):
+ facts = dict()
+ for key, value in orig.iteritems():
+ if key in new:
+ if isinstance(value, dict):
+ facts[key] = self.merge_facts(value, new[key])
+ else:
+ facts[key] = copy.copy(new[key])
+ else:
+ facts[key] = copy.deepcopy(value)
+ new_keys = set(new.keys()) - set(orig.keys())
+ for key in new_keys:
+ facts[key] = copy.deepcopy(new[key])
+ return facts
+
+ def query_metadata(self, metadata_url, headers=None, expect_json=False):
+ r, info = fetch_url(module, metadata_url, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to query metadata', result=r,
+ info=info)
+ if expect_json:
+ return module.from_json(r.read())
+ else:
+ return [line.strip() for line in r.readlines()]
+
+ def walk_metadata(self, metadata_url, headers=None, expect_json=False):
+ metadata = dict()
+
+ for line in self.query_metadata(metadata_url, headers, expect_json):
+ if line.endswith('/') and not line == 'public-keys/':
+ key = line[:-1]
+ metadata[key]=self.walk_metadata(metadata_url + line, headers,
+ expect_json)
+ else:
+ results = self.query_metadata(metadata_url + line, headers,
+ expect_json)
+ if len(results) == 1:
+ metadata[line] = results.pop()
+ else:
+ metadata[line] = results
+ return metadata
+
+ def get_provider_metadata(self, metadata_url, supports_recursive=False,
+ headers=None, expect_json=False):
+ if supports_recursive:
+ metadata = self.query_metadata(metadata_url, headers, expect_json)
+ else:
+ metadata = self.walk_metadata(metadata_url, headers, expect_json)
+ return metadata
+
+ def get_hardware_facts(self):
+ if not hasattr(self, 'hardware_facts'):
+ self.hardware_facts = Hardware().populate()
+ return self.hardware_facts
+
+ def get_base_facts(self):
+ if not hasattr(self, 'base_facts'):
+ self.base_facts = Facts().populate()
+ return self.base_facts
+
+ def get_virt_facts(self):
+ if not hasattr(self, 'virt_facts'):
+ self.virt_facts = Virtual().populate()
+ return self.virt_facts
+
+ def get_net_facts(self):
+ if not hasattr(self, 'net_facts'):
+ self.net_facts = Network(module).populate()
+ return self.net_facts
+
+ def guess_host_provider(self):
+ # TODO: cloud provider facts should probably be submitted upstream
+ virt_facts = self.get_virt_facts()
+ hardware_facts = self.get_hardware_facts()
+ product_name = hardware_facts['product_name']
+ product_version = hardware_facts['product_version']
+ virt_type = virt_facts['virtualization_type']
+ virt_role = virt_facts['virtualization_role']
+ provider = None
+ metadata = None
+
+ # TODO: this is not exposed through module_utils/facts.py in ansible,
+ # need to create PR for ansible to expose it
+ bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
+ if bios_vendor == 'Google':
+ provider = 'gce'
+ metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true'
+ headers = {'Metadata-Flavor': 'Google'}
+ metadata = self.get_provider_metadata(metadata_url, True, headers,
+ True)
+
+ # Filter sshKeys and serviceAccounts from gce metadata
+ metadata['project']['attributes'].pop('sshKeys', None)
+ metadata['instance'].pop('serviceAccounts', None)
+ elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
+ provider = 'ec2'
+ metadata_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata = self.get_provider_metadata(metadata_url)
+ elif re.search(r'OpenStack', product_name):
+ provider = 'openstack'
+ metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ metadata = self.get_provider_metadata(metadata_url, True, None, True)
+ ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
+ metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url)
+
+ # Filter public_keys and random_seed from openstack metadata
+ metadata.pop('public_keys', None)
+ metadata.pop('random_seed', None)
+ return dict(name=provider, metadata=metadata)
+
+ def normalize_provider_facts(self, provider, metadata):
+ if provider is None or metadata is None:
+ return {}
+
+ # TODO: test for ipv6_enabled where possible (gce, aws do not support)
+ # and configure ipv6 facts if available
+
+ # TODO: add support for setting user_data if available
+
+ facts = dict(name=provider, metadata=metadata)
+ network = dict(interfaces=[], ipv6_enabled=False)
+ if provider == 'gce':
+ for interface in metadata['instance']['networkInterfaces']:
+ int_info = dict(ips=[interface['ip']], network_type=provider)
+ int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ]
+ int_info['public_ips'].extend(interface['forwardedIps'])
+ _, _, network_id = interface['network'].rpartition('/')
+ int_info['network_id'] = network_id
+ network['interfaces'].append(int_info)
+ _, _, zone = metadata['instance']['zone'].rpartition('/')
+ facts['zone'] = zone
+ facts['external_id'] = metadata['instance']['id']
+
+ # Default to no sdn for GCE deployments
+ facts['use_openshift_sdn'] = False
+
+ # GCE currently only supports a single interface
+ network['ip'] = network['interfaces'][0]['ips'][0]
+ network['public_ip'] = network['interfaces'][0]['public_ips'][0]
+ network['hostname'] = metadata['instance']['hostname']
+
+ # TODO: attempt to resolve public_hostname
+ network['public_hostname'] = network['public_ip']
+ elif provider == 'ec2':
+ for interface in sorted(metadata['network']['interfaces']['macs'].values(),
+ key=lambda x: x['device-number']):
+ int_info = dict()
+ var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
+ for ips_var, int_var in var_map.iteritems():
+ ips = interface[int_var]
+ int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips
+ int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic'
+ int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None
+ network['interfaces'].append(int_info)
+ facts['zone'] = metadata['placement']['availability-zone']
+ facts['external_id'] = metadata['instance-id']
+
+ # TODO: actually attempt to determine default local and public ips
+ # by using the ansible default ip fact and the ipv4-associations
+ # form the ec2 metadata
+ network['ip'] = metadata['local-ipv4']
+ network['public_ip'] = metadata['public-ipv4']
+
+ # TODO: verify that local hostname makes sense and is resolvable
+ network['hostname'] = metadata['local-hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['public-hostname']
+ elif provider == 'openstack':
+ # openstack ec2 compat api does not support network interfaces and
+ # the version tested on did not include the info in the openstack
+ # metadata api, should be updated if neutron exposes this.
+
+ facts['zone'] = metadata['availability_zone']
+ facts['external_id'] = metadata['uuid']
+ network['ip'] = metadata['ec2_compat']['local-ipv4']
+ network['public_ip'] = metadata['ec2_compat']['public-ipv4']
+
+ # TODO: verify local hostname makes sense and is resolvable
+ network['hostname'] = metadata['hostname']
+
+ # TODO: verify that public hostname makes sense and is resolvable
+ network['public_hostname'] = metadata['ec2_compat']['public-hostname']
+
+ facts['network'] = network
+ return facts
+
+ def init_provider_facts(self):
+ provider_info = self.guess_host_provider()
+ provider_facts = self.normalize_provider_facts(
+ provider_info.get('name'),
+ provider_info.get('metadata')
+ )
+ return provider_facts
+
+ def get_facts(self):
+ # TODO: transform facts into cleaner format (openshift_<blah> instead
+ # of openshift.<blah>
+ return self.facts
+
+ def init_local_facts(self, facts={}):
+ changed = False
+
+ local_facts = ConfigParser.SafeConfigParser()
+ local_facts.read(self.filename)
+
+ section = self.role
+ if not local_facts.has_section(section):
+ local_facts.add_section(section)
+ changed = True
+
+ for key, value in facts.iteritems():
+ if isinstance(value, bool):
+ value = str(value)
+ if not value:
+ continue
+ if not local_facts.has_option(section, key) or local_facts.get(section, key) != value:
+ local_facts.set(section, key, value)
+ changed = True
+
+ if changed and not module.check_mode:
+ try:
+ fact_dir = os.path.dirname(self.filename)
+ if not os.path.exists(fact_dir):
+ os.makedirs(fact_dir)
+ with open(self.filename, 'w') as fact_file:
+ local_facts.write(fact_file)
+ except (IOError, OSError) as e:
+ raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e))
+ self.changed = changed
+
+ role_facts = dict()
+ for section in local_facts.sections():
+ role_facts[section] = dict()
+ for opt, val in local_facts.items(section):
+ role_facts[section][opt] = val
+ return role_facts
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ role=dict(default='common',
+ choices=OpenShiftFacts.known_roles,
+ required=False),
+ local_facts=dict(default={}, type='dict', required=False),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+
+ role = module.params['role']
+ local_facts = module.params['local_facts']
+ fact_file = '/etc/ansible/facts.d/openshift.fact'
+
+ openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
+
+ file_params = module.params.copy()
+ file_params['path'] = fact_file
+ file_args = module.load_file_common_arguments(file_params)
+ changed = module.set_fs_attributes_if_different(file_args,
+ openshift_facts.changed)
+
+ return module.exit_json(changed=changed,
+ ansible_facts=openshift_facts.get_facts())
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.facts import *
+from ansible.module_utils.urls import *
+main()
diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml
new file mode 100644
index 000000000..0be3afd24
--- /dev/null
+++ b/roles/openshift_facts/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies: []
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
new file mode 100644
index 000000000..5a7d10d25
--- /dev/null
+++ b/roles/openshift_facts/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Gather OpenShift facts
+ openshift_facts:
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 5a1b889b2..9f9d0a613 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -13,21 +13,24 @@ Role Variables
--------------
From this role:
-| Name | Default value |
-|
-|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? |
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up |
-| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|-------------------------------------|-----------------------|--------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
+| openshift_registry_url | UNDEF | Default docker registry to use |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
From openshift_common:
-| Name | Default Value | |
-|-------------------------------|---------------------|---------------------|
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| Name | Default Value | |
+|-------------------------------|----------------|----------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_public_ip | UNDEF | Public IP address to use for this host |
+| openshift_hostname | UNDEF | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 0159afbb5..87fb347a8 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,16 +1,17 @@
---
-openshift_master_manage_service_externally: false
-openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}"
openshift_node_ips: []
+
+# TODO: update setting these values based on the facts
+# TODO: update for console port change
os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
-- service: etcd peer
- port: 7001/tcp
- service: OpenShift api https
port: 8443/tcp
-- service: OpenShift web console https
- port: 8444/tcp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
+- service: former OpenShift web console port
+ port: 8444/tcp
+- service: former etcd peer port
+ port: 7001/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 503d08d41..6fd4dfb51 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,3 @@
---
- name: restart openshift-master
service: name=openshift-master state=restarted
- when: not openshift_master_manage_service_externally
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index d5f4776dc..aa615df39 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,17 +1,37 @@
---
+# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
+# etcd_use_ssl actually change the master config.
+
+- name: Set master OpenShift facts
+ openshift_facts:
+ role: 'master'
+ local_facts:
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+
- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
+# TODO: We should pre-generate the master config and point to the generated
+# config rather than setting command line flags here
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
- openshift_node_ips %} --nodes={{ openshift_node_ips
- | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+ line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\""
notify:
- restart openshift-master
+# TODO: should this be populated by a fact based on the deployment type
+# (origin, online, enterprise)?
- name: Set default registry url
lineinfile:
dest: /etc/sysconfig/openshift-master
@@ -21,61 +41,18 @@
notify:
- restart openshift-master
-- name: Set master OpenShift facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: master
- option: debug_level
- value: "{{ openshift_master_debug_level }}"
- - section: master
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: master
- option: externally_managed
- value: "{{ openshift_master_manage_service_externally }}"
-
-# TODO: remove this when origin PR #1298 has landed in OSE
-- name: Workaround for openshift-master taking longer than 90 seconds to issue sdNotify signal
- command: cp /usr/lib/systemd/system/openshift-master.service /etc/systemd/system/
- args:
- creates: /etc/systemd/system/openshift-master.service
-- ini_file:
- dest: /etc/systemd/system/openshift-master.service
- option: TimeoutStartSec
- section: Service
- value: 300
- state: present
- register: result
-- command: systemctl daemon-reload
- when: result | changed
-# End of workaround pending PR #1298
-
- name: Start and enable openshift-master
service: name=openshift-master enabled=yes state=started
- when: not openshift_master_manage_service_externally
- register: result
-
-#TODO: remove this when origin PR #1204 has landed in OSE
-- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
- pause: seconds=30
- when: result | changed
-# End of workaround pending PR #1204
-- name: Disable openshift-master if openshift-master is managed externally
- service: name=openshift-master enabled=false
- when: openshift_master_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
- name: Create .kube directory
file:
path: /root/.kube
state: directory
mode: 0700
+
+# TODO: Update this file if the contents of the source file are not present in
+# the dest file, will need to make sure to ignore things that could be added
- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+ command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig
args:
creates: /root/.kube/.kubeconfig
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
deleted file mode 100644
index 9a8c4bba2..000000000
--- a/roles/openshift_master/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_host_type: master
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 9210bab16..83359f164 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -16,20 +16,15 @@ Role Variables
From this role:
| Name | Default value | |
|------------------------------------------|-----------------------|----------------------------------------|
-| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? |
| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts |
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
-| openshift_node_resources | { capacity: { cpu: , memory: } } | Resource specification for this node, cpu is the number of CPUs to advertise and memory is the amount of memory in bytes to advertise. Default values chosen when not set are the number of logical CPUs for the host and 75% of total system memory |
From openshift_common:
| Name | Default Value | |
|-------------------------------|---------------------|---------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index c45524f16..df7ec41b6 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,10 +1,4 @@
---
-openshift_node_manage_service_externally: false
-openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
-openshift_node_resources:
- capacity:
- cpu:
- memory:
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index f7aa36d88..ca2992637 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,4 @@
---
- name: restart openshift-node
service: name=openshift-node state=restarted
- when: not openshift_node_manage_service_externally
+ when: not openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py
deleted file mode 100644
index 63079e59b..000000000
--- a/roles/openshift_node/library/openshift_register_node.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import os
-import multiprocessing
-import socket
-from subprocess import check_output, Popen
-
-DOCUMENTATION = '''
----
-module: openshift_register_node
-short_description: This module registers an openshift-node with an openshift-master
-author: Jason DeTiberus
-requirements: [ openshift-node ]
-notes: Node resources can be specified using either the resources option or the following options: cpu, memory
-options:
- name:
- description:
- - id for this node (usually the node fqdn)
- required: true
- hostIP:
- description:
- - ip address for this node
- required: false
- cpu:
- description:
- - number of CPUs for this node
- required: false
- default: number of logical CPUs detected
- memory:
- description:
- - Memory available for this node in bytes
- required: false
- default: 80% MemTotal
- resources:
- description:
- - A json string representing Node resources
- required: false
-'''
-EXAMPLES = '''
-# Minimal node registration
-- openshift_register_node: name=ose3.node.example.com
-
-# Node registration with all options (using cpu and memory options)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- cpu: 1
- memory: 1073741824
-
-# Node registration with all options (using resources option)
-- openshift_register_node:
- name: ose3.node.example.com
- hostIP: 192.168.1.1
- apiVersion: v1beta1
- resources:
- capacity:
- cpu: 1
- memory: 1073741824
-'''
-
-def main():
- module = AnsibleModule(
- argument_spec = dict(
- name = dict(required = True),
- hostIP = dict(),
- apiVersion = dict(),
- cpu = dict(),
- memory = dict(),
- resources = dict(),
- client_config = dict(),
- client_cluster = dict(default = 'master'),
- client_context = dict(default = 'master'),
- client_user = dict(default = 'admin')
- ),
- mutually_exclusive = [
- ['resources', 'cpu'],
- ['resources', 'memory']
- ],
- supports_check_mode=True
- )
-
- user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
- if not (user_has_client_config or module.params['client_config']):
- module.fail_json(msg="Could not locate client configuration, "
- "client_config must be specified if "
- "~/.kube/.kubeconfig is not present")
-
- client_opts = []
- if module.params['client_config']:
- client_opts.append("--kubeconfig=%s" % module.params['client_config'])
-
- try:
- output = check_output(["/usr/bin/openshift", "ex", "config", "view",
- "-o", "json"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get client configuration",
- command=e.cmd, returncode=e.returncode, output=e.output)
-
- config = json.loads(output)
- if not (bool(config['clusters']) or bool(config['contexts']) or
- bool(config['current-context']) or bool(config['users'])):
- module.fail_json(msg="Client config missing required values",
- output=output)
-
- client_context = module.params['client_context']
- if client_context:
- config_context = next((context for context in config['contexts']
- if context['name'] == client_context), None)
- if not config_context:
- module.fail_json(msg="Context %s not found in client config" %
- client_context)
- if not config['current-context'] or config['current-context'] != client_context:
- client_opts.append("--context=%s" % client_context)
-
- client_user = module.params['client_user']
- if client_user:
- config_user = next((user for user in config['users']
- if user['name'] == client_user), None)
- if not config_user:
- module.fail_json(msg="User %s not found in client config" %
- client_user)
- if client_user != config_context['context']['user']:
- client_opts.append("--user=%s" % client_user)
-
- client_cluster = module.params['client_cluster']
- if client_cluster:
- config_cluster = next((cluster for cluster in config['clusters']
- if cluster['name'] == client_cluster), None)
- if not client_cluster:
- module.fail_json(msg="Cluster %s not found in client config" %
- client_cluster)
- if client_cluster != config_context['context']['cluster']:
- client_opts.append("--cluster=%s" % client_cluster)
-
- node_def = dict(
- id = module.params['name'],
- kind = 'Node',
- apiVersion = 'v1beta1',
- resources = dict(
- capacity = dict()
- )
- )
-
- for key, value in module.params.iteritems():
- if key in ['cpu', 'memory']:
- node_def['resources']['capacity'][key] = value
- elif key == 'name':
- node_def['id'] = value
- elif key != 'client_config':
- if value:
- node_def[key] = value
-
- if not node_def['resources']['capacity']['cpu']:
- node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count()
-
- if not node_def['resources']['capacity']['memory']:
- with open('/proc/meminfo', 'r') as mem:
- for line in mem:
- entries = line.split()
- if str(entries.pop(0)) == 'MemTotal:':
- mem_total_kb = int(entries.pop(0))
- mem_capacity = int(mem_total_kb * 1024 * .75)
- node_def['resources']['capacity']['memory'] = mem_capacity
- break
-
- try:
- output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts,
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- module.fail_json(msg="Failed to get node list", command=e.cmd,
- returncode=e.returncode, output=e.output)
-
- if re.search(module.params['name'], output, re.MULTILINE):
- module.exit_json(changed=False, node_def=node_def)
- elif module.check_mode:
- module.exit_json(changed=True, node_def=node_def)
-
- config_def = dict(
- metadata = dict(
- name = "add-node-%s" % module.params['name']
- ),
- kind = 'Config',
- apiVersion = 'v1beta1',
- items = [node_def]
- )
-
- p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, close_fds=True)
- (out, err) = p.communicate(module.jsonify(config_def))
- ret = p.returncode
-
- if ret != 0:
- if re.search("minion \"%s\" already exists" % module.params['name'],
- err):
- module.exit_json(changed=False,
- msg="node definition already exists", config_def=config_def)
- else:
- module.fail_json(msg="Node creation failed.", ret=ret, out=out,
- err=err, config_def=config_def)
-
- module.exit_json(changed=True, out=out, err=err, ret=ret,
- node_def=config_def)
-
-# import module snippets
-from ansible.module_utils.basic import *
-main()
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 6721c7401..e3c04585b 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,27 +1,38 @@
---
+# TODO: allow for overriding default ports where possible
+# TODO: trigger the external service when restart is needed
+
+- name: Set node OpenShift facts
+ openshift_facts:
+ role: 'node'
+ local_facts:
+ debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
+
+- name: Test if node certs and config exist
+ stat: path={{ item }}
+ failed_when: not result.stat.exists
+ register: result
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_path }}/cert.crt"
+ - "{{ cert_path }}/key.key"
+ - "{{ cert_path }}/.kubeconfig"
+ - "{{ cert_path }}/server.crt"
+ - "{{ cert_path }}/server.key"
+ - "{{ cert_parent_path }}/ca/cert.crt"
+ #- "{{ cert_path }}/node.yaml"
+
- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
-- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: mktemp
-
-- name: Retrieve OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
- ignore_errors: yes
-
-- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
-
-- name: Store OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
- ignore_errors: yes
-
-- local_action: file name={{ mktemp.stdout }} state=absent
-
+# --create-certs=false is a temporary workaround until
+# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
+# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
regexp: '^OPTIONS='
- line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --loglevel={{ openshift_node_debug_level }}\""
+ line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\""
notify:
- restart openshift-node
@@ -34,45 +45,10 @@
notify:
- restart openshift-node
-- name: Set OpenShift node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: node
- option: debug_level
- value: "{{ openshift_node_debug_level }}"
- - section: node
- option: public_ip
- value: "{{ openshift_public_ip }}"
- - section: node
- option: externally_managed
- value: "{{ openshift_node_manage_service_externally }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-node
- service: name=openshift-node enabled=yes state=restarted
- when: not openshift_node_manage_service_externally
+ service: name=openshift-node enabled=yes state=started
+ when: not openshift.common.use_openshift_sdn|bool
- name: Disable openshift-node if openshift-node is managed externally
service: name=openshift-node enabled=false
- when: openshift_node_manage_service_externally
-
-# TODO: create an os_vars role that has generic env related config and move
-# the root kubeconfig setting there, cannot use dependencies to force ordering
-# with openshift_node and openshift_master because the way conditional
-# dependencies work with current ansible would also exclude the
-# openshift_common dependency.
-- name: Create .kube directory
- file:
- path: /root/.kube
- state: directory
- mode: 0700
-- name: Configure root user kubeconfig
- command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
- args:
- creates: /root/.kube/.kubeconfig
-
-- name: Register node (if not already registered)
- openshift_register_node:
- name: "{{ openshift_hostname }}"
- resources: "{{ openshift_node_resources }}"
+ when: openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
deleted file mode 100644
index 9841d52f9..000000000
--- a/roles/openshift_node/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_host_type: node
diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md
new file mode 100644
index 000000000..b96faa044
--- /dev/null
+++ b/roles/openshift_register_nodes/README.md
@@ -0,0 +1,34 @@
+OpenShift Register Nodes
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+TODO
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
new file mode 100644
index 000000000..3501e8922
--- /dev/null
+++ b/roles/openshift_register_nodes/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+openshift_kube_api_version: v1beta1
+openshift_cert_dir: openshift.local.certificates
+openshift_cert_dir_parent: /var/lib/openshift
+openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}"
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
new file mode 100755
index 000000000..8ebeb087a
--- /dev/null
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import os
+import multiprocessing
+import socket
+from subprocess import check_output, Popen
+from decimal import *
+
+DOCUMENTATION = '''
+---
+module: kubernetes_register_node
+short_description: Registers a kubernetes node with a master
+description:
+ - Registers a kubernetes node with a master
+options:
+ name:
+ default: null
+ description:
+ - Identifier for this node (usually the node fqdn).
+ required: true
+ api_verison:
+ choices: ['v1beta1', 'v1beta3']
+ default: 'v1beta1'
+ description:
+ - Kubernetes API version to use
+ required: true
+ host_ip:
+ default: null
+ description:
+ - IP Address to associate with the node when registering.
+ Available in the following API versions: v1beta1.
+ required: false
+ hostnames:
+ default: []
+ description:
+ - Valid hostnames for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ external_ips:
+ default: []
+ description:
+ - External IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ internal_ips:
+ default: []
+ description:
+ - Internal IP Addresses for this node. Available in the following API
+ versions: v1beta3.
+ required: false
+ cpu:
+ default: null
+ description:
+ - Number of CPUs to allocate for this node. When using the v1beta1
+ API, you must specify the CPU count as a floating point number
+ with no more than 3 decimal places. API version v1beta3 and newer
+ accepts arbitrary float values.
+ required: false
+ memory:
+ default: null
+ description:
+ - Memory available for this node. When using the v1beta1 API, you
+ must specify the memory size in bytes. API version v1beta3 and
+ newer accepts binary SI and decimal SI values.
+ required: false
+'''
+EXAMPLES = '''
+# Minimal node registration
+- openshift_register_node: name=ose3.node.example.com
+
+# Node registration using the v1beta1 API and assigning 1 CPU core and 10 GB of
+# Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta1
+ hostIP: 192.168.1.1
+ cpu: 1
+ memory: 500000000
+
+# Node registration using the v1beta3 API, setting an alternate hostname,
+# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
+- openshift_register_node:
+ name: ose3.node.example.com
+ api_version: v1beta3
+ external_ips: ['192.168.1.5']
+ internal_ips: ['10.0.0.5']
+ hostnames: ['ose2.node.internal.local']
+ cpu: 3.5
+ memory: 1Ti
+'''
+
+
+class ClientConfigException(Exception):
+ pass
+
+class ClientConfig:
+ def __init__(self, client_opts, module):
+ _, output, error = module.run_command(["/usr/bin/openshift", "ex",
+ "config", "view", "-o",
+ "json"] + client_opts,
+ check_rc = True)
+ self.config = json.loads(output)
+
+ if not (bool(self.config['clusters']) or
+ bool(self.config['contexts']) or
+ bool(self.config['current-context']) or
+ bool(self.config['users'])):
+ raise ClientConfigException(msg="Client config missing required " \
+ "values",
+ output=output)
+
+ def current_context(self):
+ return self.config['current-context']
+
+ def section_has_value(self, section_name, value):
+ section = self.config[section_name]
+ if isinstance(section, dict):
+ return value in section
+ else:
+ val = next((item for item in section
+ if item['name'] == value), None)
+ return val is not None
+
+ def has_context(self, context):
+ return self.section_has_value('contexts', context)
+
+ def has_user(self, user):
+ return self.section_has_value('users', user)
+
+ def has_cluster(self, cluster):
+ return self.section_has_value('clusters', cluster)
+
+ def get_value_for_context(self, context, attribute):
+ contexts = self.config['contexts']
+ if isinstance(contexts, dict):
+ return contexts[context][attribute]
+ else:
+ return next((c['context'][attribute] for c in contexts
+ if c['name'] == context), None)
+
+ def get_user_for_context(self, context):
+ return self.get_value_for_context(context, 'user')
+
+ def get_cluster_for_context(self, context):
+ return self.get_value_for_context(context, 'cluster')
+
+class Util:
+ @staticmethod
+ def remove_empty_elements(mapping):
+ if isinstance(mapping, dict):
+ m = mapping.copy()
+ for key, val in mapping.iteritems():
+ if not val:
+ del m[key]
+ return m
+ else:
+ return mapping
+
+class NodeResources:
+ def __init__(self, version, cpu=None, memory=None):
+ if version == 'v1beta1':
+ self.resources = dict(capacity=dict())
+ self.resources['capacity']['cpu'] = cpu
+ self.resources['capacity']['memory'] = memory
+
+ def get_resources(self):
+ return Util.remove_empty_elements(self.resources)
+
+class NodeSpec:
+ def __init__(self, version, cpu=None, memory=None, cidr=None, externalID=None):
+ if version == 'v1beta3':
+ self.spec = dict(podCIDR=cidr, externalID=externalID,
+ capacity=dict())
+ self.spec['capacity']['cpu'] = cpu
+ self.spec['capacity']['memory'] = memory
+
+ def get_spec(self):
+ return Util.remove_empty_elements(self.spec)
+
+class NodeStatus:
+ def addAddresses(self, addressType, addresses):
+ addressList = []
+ for address in addresses:
+ addressList.append(dict(type=addressType, address=address))
+ return addressList
+
+ def __init__(self, version, externalIPs = [], internalIPs = [],
+ hostnames = []):
+ if version == 'v1beta3':
+ self.status = dict(addresses = addAddresses('ExternalIP',
+ externalIPs) +
+ addAddresses('InternalIP',
+ internalIPs) +
+ addAddresses('Hostname',
+ hostnames))
+
+ def get_status(self):
+ return Util.remove_empty_elements(self.status)
+
+class Node:
+ def __init__(self, module, client_opts, version='v1beta1', name=None,
+ hostIP = None, hostnames=[], externalIPs=[], internalIPs=[],
+ cpu=None, memory=None, labels=dict(), annotations=dict(),
+ podCIDR=None, externalID=None):
+ self.module = module
+ self.client_opts = client_opts
+ if version == 'v1beta1':
+ self.node = dict(id = name,
+ kind = 'Node',
+ apiVersion = version,
+ hostIP = hostIP,
+ resources = NodeResources(version, cpu, memory),
+ cidr = podCIDR,
+ labels = labels,
+ annotations = annotations,
+ externalID = externalID
+ )
+ elif version == 'v1beta3':
+ metadata = dict(name = name,
+ labels = labels,
+ annotations = annotations
+ )
+ self.node = dict(kind = 'Node',
+ apiVersion = version,
+ metadata = metadata,
+ spec = NodeSpec(version, cpu, memory, podCIDR,
+ externalID),
+ status = NodeStatus(version, externalIPs,
+ internalIPs, hostnames),
+ )
+
+ def get_name(self):
+ if self.node['apiVersion'] == 'v1beta1':
+ return self.node['id']
+ elif self.node['apiVersion'] == 'v1beta3':
+ return self.node['name']
+
+ def get_node(self):
+ node = self.node.copy()
+ if self.node['apiVersion'] == 'v1beta1':
+ node['resources'] = self.node['resources'].get_resources()
+ elif self.node['apiVersion'] == 'v1beta3':
+ node['spec'] = self.node['spec'].get_spec()
+ node['status'] = self.node['status'].get_status()
+ return Util.remove_empty_elements(node)
+
+ def exists(self):
+ _, output, error = self.module.run_command(["/usr/bin/osc", "get",
+ "nodes"] + self.client_opts,
+ check_rc = True)
+ if re.search(self.module.params['name'], output, re.MULTILINE):
+ return True
+ return False
+
+ def create(self):
+ cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-']
+ rc, output, error = self.module.run_command(cmd,
+ data=self.module.jsonify(self.get_node()))
+ if rc != 0:
+ if re.search("minion \"%s\" already exists" % self.get_name(),
+ error):
+ self.module.exit_json(changed=False,
+ msg="node definition already exists",
+ node=self.get_node())
+ else:
+ self.module.fail_json(msg="Node creation failed.", rc=rc,
+ output=output, error=error,
+ node=self.get_node())
+ else:
+ return True
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required = True, type = 'str'),
+ host_ip = dict(type = 'str'),
+ hostnames = dict(type = 'list', default = []),
+ external_ips = dict(type = 'list', default = []),
+ internal_ips = dict(type = 'list', default = []),
+ api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3
+ choices = ['v1beta1', 'v1beta3']),
+ cpu = dict(type = 'str'),
+ memory = dict(type = 'str'),
+ labels = dict(type = 'dict', default = {}), # TODO: needs documented
+ annotations = dict(type = 'dict', default = {}), # TODO: needs documented
+ pod_cidr = dict(type = 'str'), # TODO: needs documented
+ external_id = dict(type = 'str'), # TODO: needs documented
+ client_config = dict(type = 'str'), # TODO: needs documented
+ client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented
+ client_context = dict(type = 'str', default = 'master'), # TODO: needs documented
+ client_user = dict(type = 'str', default = 'admin') # TODO: needs documented
+ ),
+ mutually_exclusive = [
+ ['host_ip', 'external_ips'],
+ ['host_ip', 'internal_ips'],
+ ['host_ip', 'hostnames'],
+ ],
+ supports_check_mode=True
+ )
+
+ user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ if not (user_has_client_config or module.params['client_config']):
+ module.fail_json(msg="Could not locate client configuration, "
+ "client_config must be specified if "
+ "~/.kube/.kubeconfig is not present")
+
+ client_opts = []
+ if module.params['client_config']:
+ client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+
+ try:
+ config = ClientConfig(client_opts, module)
+ except ClientConfigException as e:
+ module.fail_json(msg="Failed to get client configuration", exception=e)
+
+ client_context = module.params['client_context']
+ if config.has_context(client_context):
+ if client_context != config.current_context():
+ client_opts.append("--context=%s" % client_context)
+ else:
+ module.fail_json(msg="Context %s not found in client config" %
+ client_context)
+
+ client_user = module.params['client_user']
+ if config.has_user(client_user):
+ if client_user != config.get_user_for_context(client_context):
+ client_opts.append("--user=%s" % client_user)
+ else:
+ module.fail_json(msg="User %s not found in client config" %
+ client_user)
+
+ client_cluster = module.params['client_cluster']
+ if config.has_cluster(client_cluster):
+ if client_cluster != config.get_cluster_for_context(client_cluster):
+ client_opts.append("--cluster=%s" % client_cluster)
+ else:
+ module.fail_json(msg="Cluster %s not found in client config" %
+ client_cluster)
+
+ # TODO: provide sane defaults for some (like hostname, externalIP,
+ # internalIP, etc)
+ node = Node(module, client_opts, module.params['api_version'],
+ module.params['name'], module.params['host_ip'],
+ module.params['hostnames'], module.params['external_ips'],
+ module.params['internal_ips'], module.params['cpu'],
+ module.params['memory'], module.params['labels'],
+ module.params['annotations'], module.params['pod_cidr'],
+ module.params['external_id'])
+
+ # TODO: attempt to support changing node settings where possible and/or
+ # modifying node resources
+ if node.exists():
+ module.exit_json(changed=False, node=node.get_node())
+ elif module.check_mode:
+ module.exit_json(changed=True, node=node.get_node())
+ else:
+ if node.create():
+ module.exit_json(changed=True,
+ msg="Node created successfully",
+ node=node.get_node())
+ else:
+ module.fail_json(msg="Unknown error creating node",
+ node=node.get_node())
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml
new file mode 100644
index 000000000..e40a152c1
--- /dev/null
+++ b/roles/openshift_register_nodes/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description:
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.8
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_facts }
+
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
new file mode 100644
index 000000000..7319b88b1
--- /dev/null
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -0,0 +1,67 @@
+---
+# TODO: support new create-config command to generate node certs and config
+# TODO: recreate master/node configs if settings that affect the configs
+# change (hostname, public_hostname, ip, public_ip, etc)
+
+# TODO: create a failed_when condition
+- name: Create node server certificates
+ command: >
+ /usr/bin/openshift admin create-server-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+ --hostnames={{ [item.openshift.common.hostname,
+ item.openshift.common.public_hostname]|unique|join(",") }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt"
+ with_items: openshift_nodes
+ register: server_cert_result
+
+# TODO: create a failed_when condition
+- name: Create node client certificates
+ command: >
+ /usr/bin/openshift admin create-node-cert
+ --overwrite=false
+ --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+ --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+ --node-name={{ item.openshift.common.hostname }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt"
+ with_items: openshift_nodes
+ register: node_cert_result
+
+# TODO: create a failed_when condition
+- name: Create kubeconfigs for nodes
+ command: >
+ /usr/bin/openshift admin create-kubeconfig
+ --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt
+ --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key
+ --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ args:
+ chdir: "{{ openshift_cert_dir_parent }}"
+ creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig"
+ with_items: openshift_nodes
+ register: kubeconfig_result
+
+- name: Register unregistered nodes
+ kubernetes_register_node:
+ client_user: openshift-client
+ name: "{{ item.openshift.common.hostname }}"
+ api_version: "{{ openshift_kube_api_version }}"
+ cpu: "{{ item.openshift.node.resources_cpu | default(None) }}"
+ memory: "{{ item.openshift.node.resources_memory | default(None) }}"
+ pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}"
+ host_ip: "{{ item.openshift.common.ip }}"
+ labels: "{{ item.openshift.node.labels | default({}) }}"
+ annotations: "{{ item.openshift.node.annotations | default({}) }}"
+ external_id: "{{ item.openshift.node.external_id }}"
+ # TODO: support customizing other attributes such as: client_config,
+ # client_cluster, client_context, client_user
+ # TODO: update for v1beta3 changes after rebase: hostnames, external_ips,
+ # internal_ips, external_id
+ with_items: openshift_nodes
+ register: register_result
diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md
new file mode 100644
index 000000000..6713e11fc
--- /dev/null
+++ b/roles/openshift_repos/README.md
@@ -0,0 +1,38 @@
+OpenShift Repos
+================
+
+Configures repositories for an OpenShift installation
+
+Requirements
+------------
+
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------------|---------------|----------------------------------------------|
+| openshift_deployment_type | online | Possible values enterprise, origin, online |
+| openshift_additional_repos | {} | TODO |
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml
index 6fe2bf621..1730207f4 100644
--- a/roles/repos/defaults/main.yaml
+++ b/roles/openshift_repos/defaults/main.yaml
@@ -1,5 +1,7 @@
---
# TODO: once we are able to configure/deploy origin using the openshift roles,
# then we should default to origin
+
+# TODO: push the defaulting of these values to the openshift_facts module
openshift_deployment_type: online
openshift_additional_repos: {}
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
index 7b40671a4..7b40671a4 100644
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
+++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
index 0f83b622d..0f83b622d 100644
--- a/roles/repos/files/online/RPM-GPG-KEY-redhat-release
+++ b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release
diff --git a/roles/repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo
index 1deae2939..1deae2939 100644
--- a/roles/repos/files/online/epel7-kubernetes.repo
+++ b/roles/openshift_repos/files/online/epel7-kubernetes.repo
diff --git a/roles/repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo
index c7629872d..c7629872d 100644
--- a/roles/repos/files/online/epel7-openshift.repo
+++ b/roles/openshift_repos/files/online/epel7-openshift.repo
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
index cfe41f691..cfe41f691 100644
--- a/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
+++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo
diff --git a/roles/repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
index ddc93193d..ddc93193d 100644
--- a/roles/repos/files/online/oso-rhui-rhel-7-server.repo
+++ b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo
diff --git a/roles/repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/repos/files/online/rhel-7-libra-candidate.repo
+++ b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo
diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml
new file mode 100644
index 000000000..0558b822c
--- /dev/null
+++ b/roles/openshift_repos/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: TODO
+ description: OpenShift Repositories
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_facts }
diff --git a/roles/repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 43786da41..bb1551d37 100644
--- a/roles/repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -1,6 +1,12 @@
---
# TODO: Add flag for enabling EPEL repo, default to false
+# TODO: Add subscription-management config, with parameters
+# for username, password, poolid(name), and official repos to
+# enable/disable. Might need to make a module that extends the
+# subscription management module to take a poolid and enable/disable the
+# proper repos correctly.
+
- assert:
that: openshift_deployment_type in known_openshift_deployment_types
@@ -8,6 +14,11 @@
- fail: msg="OpenShift Origin support is not currently enabled"
when: openshift_deployment_type == 'origin'
+- name: Ensure libselinux-python is installed
+ yum:
+ pkg: libselinux-python
+ state: present
+
- name: Create any additional repos that are defined
template:
src: yum_repo.j2
diff --git a/roles/repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 7ea2c7460..7ea2c7460 100644
--- a/roles/repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
diff --git a/roles/repos/vars/main.yml b/roles/openshift_repos/vars/main.yml
index bbb4c77e7..bbb4c77e7 100644
--- a/roles/repos/vars/main.yml
+++ b/roles/openshift_repos/vars/main.yml
diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml
deleted file mode 100644
index da7655546..000000000
--- a/roles/openshift_sdn_master/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml
index e6e5514d1..5de32cc13 100644
--- a/roles/openshift_sdn_master/meta/main.yml
+++ b/roles/openshift_sdn_master/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
index e1761afdc..f2d61043b 100644
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -1,4 +1,13 @@
---
+# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
+# started yet
+
+- name: Set master sdn OpenShift facts
+ openshift_facts:
+ role: 'master_sdn'
+ local_facts:
+ debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-master
yum:
pkg: openshift-sdn-master
@@ -8,17 +17,10 @@
lineinfile:
dest: /etc/sysconfig/openshift-sdn-master
regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\""
+ line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\""
notify:
- restart openshift-sdn-master
-- name: Set openshift-sdn-master facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-master
- option: debug_level
- value: "{{ openshift_sdn_master_debug_level }}"
-
- name: Enable openshift-sdn-master
service:
name: openshift-sdn-master
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
index 294550219..e6b6a9503 100644
--- a/roles/openshift_sdn_node/README.md
+++ b/roles/openshift_sdn_node/README.md
@@ -17,19 +17,12 @@ From this role:
| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-From openshift_node:
-| Name | Default value | |
-|-----------------------|------------------|--------------------------------------|
-| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
-
-
From openshift_common:
| Name | Default value | |
|-------------------------------|---------------------|----------------------------------------|
| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_hostname_workaround | True | |
| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
Dependencies
------------
diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml
deleted file mode 100644
index 9612d9d91..000000000
--- a/roles/openshift_sdn_node/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml
index ab45ff51e..ffe10f836 100644
--- a/roles/openshift_sdn_node/meta/main.yml
+++ b/roles/openshift_sdn_node/meta/main.yml
@@ -11,4 +11,5 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
index ff05a6972..729c28879 100644
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -1,4 +1,10 @@
---
+- name: Set node sdn OpenShift facts
+ openshift_facts:
+ role: 'node_sdn'
+ local_facts:
+ debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
+
- name: Install openshift-sdn-node
yum:
pkg: openshift-sdn-node
@@ -14,28 +20,19 @@
backrefs: yes
with_items:
- regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"'
+ line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"'
- regex: '^(MASTER_URL=)'
- line: '\1"http://{{ openshift_master_ips | first }}:4001"'
+ line: '\1"{{ openshift_sdn_master_url }}"'
- regex: '^(MINION_IP=)'
- line: '\1"{{ openshift_public_ip }}"'
+ line: '\1"{{ openshift.common.ip }}"'
# TODO lock down the insecure-registry config to a more sane value than
# 0.0.0.0/0
- regex: '^(DOCKER_OPTIONS=)'
line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
notify: restart openshift-sdn-node
-- name: Set openshift-sdn-node facts
- include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
- facts:
- - section: sdn-node
- option: debug_level
- value: "{{ openshift_sdn_node_debug_level }}"
-
-# fixme: Once the openshift_cluster playbook is published state should be started
-# Always bounce service to pick up new credentials
- name: Start and enable openshift-sdn-node
service:
name: openshift-sdn-node
enabled: yes
- state: restarted
+ state: started
diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml
new file mode 100644
index 000000000..208065df2
--- /dev/null
+++ b/roles/os_env_extras_node/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+# From the origin rpm there exists instructions on how to
+# setup origin properly. The following steps come from there
+- name: Change root to be in the Docker group
+ user: name=root groups=dockerroot append=yes
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index fef710055..90588d2ae 100644..100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
from subprocess import call, check_output
@@ -51,11 +52,13 @@ class IpTablesCreateJumpRuleError(IpTablesError):
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager:
- def __init__(self, module, ip_version, check_mode, chain):
+ def __init__(self, module):
self.module = module
- self.ip_version = ip_version
- self.check_mode = check_mode
- self.chain = chain
+ self.ip_version = module.params['ip_version']
+ self.check_mode = module.check_mode
+ self.chain = module.params['chain']
+ self.create_jump_rule = module.params['create_jump_rule']
+ self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
@@ -70,13 +73,16 @@ class IpTablesManager:
msg="Failed to save iptables rules",
cmd=e.cmd, exit_code=e.returncode, output=e.output)
+ def verify_chain(self):
+ if not self.chain_exists():
+ self.create_chain()
+ if self.create_jump_rule and not self.jump_rule_exists():
+ self.create_jump()
+
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
- if not self.chain_exists():
- self.create_chain()
- if not self.jump_rule_exists():
- self.create_jump_rule()
+ self.verify_chain()
if self.check_mode:
self.changed = True
@@ -121,13 +127,13 @@ class IpTablesManager:
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
- def create_jump_rule(self):
+ def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
- cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+ cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
@@ -144,11 +150,11 @@ class IpTablesManager:
continue
last_rule_target = rule[1]
- # Raise an exception if we do not find a valid INPUT rule
+ # Raise an exception if we do not find a valid rule
if not last_rule_num or not last_rule_target:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to find existing INPUT rules",
+ msg="Failed to find existing %s rules" % self.jump_rule_chain,
cmd=None, exit_code=None, output=None)
# Naively assume that if the last row is a REJECT rule, then
@@ -156,19 +162,20 @@ class IpTablesManager:
# assume that we can just append the rule.
if last_rule_target == 'REJECT':
# insert rule
- cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+ cmd = self.cmd + ['-I', self.jump_rule_chain, str(last_rule_num)]
else:
# append rule
- cmd = self.cmd + ['-A', 'INPUT']
+ cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
changed = True
self.output.append(output)
+ self.save()
except subprocess.CalledProcessError as e:
if '--line-numbers' in e.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
- msg="Failed to query existing INPUT rules to "
+ msg="Failed to query existing %s rules to " % self.jump_rule_chain +
"determine jump rule location",
cmd=e.cmd, exit_code=e.returncode,
output=e.output)
@@ -192,6 +199,7 @@ class IpTablesManager:
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
+ self.save()
except subprocess.CalledProcessError as e:
raise IpTablesCreateChainError(
chain=self.chain,
@@ -200,7 +208,7 @@ class IpTablesManager:
)
def jump_rule_exists(self):
- cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
+ cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if subprocess.call(cmd) == 0 else False
def chain_exists(self):
@@ -220,9 +228,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
- action=dict(required=True, choices=['add', 'remove']),
- protocol=dict(required=True, choices=['tcp', 'udp']),
- port=dict(required=True, type='int'),
+ action=dict(required=True, choices=['add', 'remove', 'verify_chain']),
+ chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
+ create_jump_rule=dict(required=False, type='bool', default=True),
+ jump_rule_chain=dict(required=False, default='INPUT'),
+ protocol=dict(required=False, choices=['tcp', 'udp']),
+ port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
@@ -232,16 +243,24 @@ def main():
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
- ip_version = module.params['ip_version']
- chain = 'OS_FIREWALL_ALLOW'
- iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+ if action in ['add', 'remove']:
+ if not protocol:
+ error = "protocol is required when action is %s" % action
+ module.fail_json(msg=error)
+ if not port:
+ error = "port is required when action is %s" % action
+ module.fail_json(msg=error)
+
+ iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
+ elif action == 'verify_chain':
+ iptables_manager.verify_chain()
except IpTablesError as e:
module.fail_json(msg=e.msg)
diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml
index 7a8cef6c5..8592371e8 100644
--- a/roles/os_firewall/meta/main.yml
+++ b/roles/os_firewall/meta/main.yml
@@ -1,3 +1,4 @@
+---
galaxy_info:
author: Jason DeTiberus
description: os_firewall
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 469cfab6f..b6bddd5c5 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,7 @@
yum:
name: firewalld
state: present
+ register: install_result
- name: Check if iptables-services is installed
command: rpm -q iptables-services
@@ -20,6 +21,10 @@
- ip6tables
when: pkg_check.rc == 0
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
- name: Start and enable firewalld service
service:
name: firewalld
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 87e77c083..7b5c00a9b 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,7 @@
with_items:
- iptables
- iptables-services
+ register: install_result
- name: Check if firewalld is installed
command: rpm -q firewalld
@@ -20,14 +21,15 @@
enabled: no
when: pkg_check.rc == 0
-- name: Start and enable iptables services
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: install_result | changed
+
+- name: Start and enable iptables service
service:
- name: "{{ item }}"
+ name: iptables
state: started
enabled: yes
- with_items:
- - iptables
- - ip6tables
register: result
- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
new file mode 100644
index 000000000..4a2c3d47a
--- /dev/null
+++ b/roles/os_update_latest/tasks/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Update all packages
+ yum: name=* state=latest
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
new file mode 100755
index 000000000..f4f52909b
--- /dev/null
+++ b/roles/os_zabbix/library/zbxapi.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Purpose: An ansible module to communicate with zabbix.
+#
+
+import json
+import httplib2
+import sys
+import os
+import re
+
+class ZabbixAPI(object):
+ '''
+ ZabbixAPI class
+ '''
+ classes = {
+ 'Action': ['create', 'delete', 'get', 'update'],
+ 'Alert': ['get'],
+ 'Application': ['create', 'delete', 'get', 'massadd', 'update'],
+ 'Configuration': ['export', 'import'],
+ 'Dcheck': ['get'],
+ 'Dhost': ['get'],
+ 'Drule': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Dservice': ['get'],
+ 'Event': ['acknowledge', 'get'],
+ 'Graph': ['create', 'delete', 'get', 'update'],
+ 'Graphitem': ['get'],
+ 'Graphprototype': ['create', 'delete', 'get', 'update'],
+ 'History': ['get'],
+ 'Hostgroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostinterface': ['create', 'delete', 'get', 'massadd', 'massremove', 'replacehostinterfaces', 'update'],
+ 'Host': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Hostprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Httptest': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Iconmap': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Image': ['create', 'delete', 'get', 'update'],
+ 'Item': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Itemprototype': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Maintenance': ['create', 'delete', 'get', 'update'],
+ 'Map': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Mediatype': ['create', 'delete', 'get', 'update'],
+ 'Proxy': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Screen': ['create', 'delete', 'get', 'update'],
+ 'Screenitem': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'update', 'updatebyposition'],
+ 'Script': ['create', 'delete', 'execute', 'get', 'getscriptsbyhosts', 'update'],
+ 'Service': ['adddependencies', 'addtimes', 'create', 'delete', 'deletedependencies', 'deletetimes', 'get', 'getsla', 'isreadable', 'iswritable', 'update'],
+ 'Template': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massremove', 'massupdate', 'update'],
+ 'Templatescreen': ['copy', 'create', 'delete', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Templatescreenitem': ['get'],
+ 'Trigger': ['adddependencies', 'create', 'delete', 'deletedependencies', 'get', 'isreadable', 'iswritable', 'update'],
+ 'Triggerprototype': ['create', 'delete', 'get', 'update'],
+ 'User': ['addmedia', 'create', 'delete', 'deletemedia', 'get', 'isreadable', 'iswritable', 'login', 'logout', 'update', 'updatemedia', 'updateprofile'],
+ 'Usergroup': ['create', 'delete', 'get', 'isreadable', 'iswritable', 'massadd', 'massupdate', 'update'],
+ 'Usermacro': ['create', 'createglobal', 'delete', 'deleteglobal', 'get', 'update', 'updateglobal'],
+ 'Usermedia': ['get'],
+ }
+
+ def __init__(self, data={}):
+ self.server = data['server'] or None
+ self.username = data['user'] or None
+ self.password = data['password'] or None
+ if any(map(lambda value: value == None, [self.server, self.username, self.password])):
+ print 'Please specify zabbix server url, username, and password.'
+ sys.exit(1)
+
+ self.verbose = data.has_key('verbose')
+ self.use_ssl = data.has_key('use_ssl')
+ self.auth = None
+
+ for class_name, method_names in self.classes.items():
+ #obj = getattr(self, class_name)(self)
+ #obj.__dict__
+ setattr(self, class_name.lower(), getattr(self, class_name)(self))
+
+ results = self.user.login(user=self.username, password=self.password)
+
+ if results[0]['status'] == '200':
+ if results[1].has_key('result'):
+ self.auth = results[1]['result']
+ elif results[1].has_key('error'):
+ print "Unable to authenticate with zabbix server. {0} ".format(results[1]['error'])
+ sys.exit(1)
+ else:
+ print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
+ sys.exit(1)
+
+ def perform(self, method, params):
+ '''
+ This method calls your zabbix server.
+
+ It requires the following parameters in order for a proper request to be processed:
+
+ jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0;
+ method - the API method being called;
+ params - parameters that will be passed to the API method;
+ id - an arbitrary identifier of the request;
+ auth - a user authentication token; since we don't have one yet, it's set to null.
+ '''
+ http_method = "POST"
+ if params.has_key("http_method"):
+ http_method = params['http_method']
+
+ jsonrpc = "2.0"
+ if params.has_key('jsonrpc'):
+ jsonrpc = params['jsonrpc']
+
+ rid = 1
+ if params.has_key('id'):
+ rid = params['id']
+
+ http = None
+ if self.use_ssl:
+ http = httplib2.Http()
+ else:
+ http = httplib2.Http( disable_ssl_certificate_validation=True,)
+
+ headers = params.get('headers', {})
+ headers["Content-type"] = "application/json"
+
+ body = {
+ "jsonrpc": jsonrpc,
+ "method": method,
+ "params": params,
+ "id": rid,
+ 'auth': self.auth,
+ }
+
+ if method in ['user.login','api.version']:
+ del body['auth']
+
+ body = json.dumps(body)
+
+ if self.verbose:
+ print body
+ print method
+ print headers
+ httplib2.debuglevel = 1
+
+ response, results = http.request(self.server, http_method, body, headers)
+
+ if self.verbose:
+ print response
+ print results
+
+ try:
+ results = json.loads(results)
+ except ValueError as e:
+ results = {"error": e.message}
+
+ return response, results
+
+ '''
+ This bit of metaprogramming is where the ZabbixAPI subclasses are created.
+ For each of ZabbixAPI.classes we create a class from the key and methods
+ from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
+ to each subclass in order for each to be able to call the perform method.
+ '''
+ @staticmethod
+ def meta(class_name, method_names):
+ # This meta method allows a class to add methods to it.
+ def meta_method(Class, method_name):
+ # This template method is a stub method for each of the subclass
+ # methods.
+ def template_method(self, **params):
+ return self.parent.perform(class_name.lower()+"."+method_name, params)
+ template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name)
+ template_method.__name__ = method_name
+ # this is where the template method is placed inside of the subclass
+ # e.g. setattr(User, "create", stub_method)
+ setattr(Class, template_method.__name__, template_method)
+
+ # This class call instantiates a subclass. e.g. User
+ Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() })
+ # This init method gets placed inside of the Class
+ # to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
+ # is passed in to allow each class access to the perform method.
+ def __init__(self, parent):
+ self.parent = parent
+ # This attaches the init to the subclass. e.g. Create
+ setattr(Class, __init__.__name__, __init__)
+ # For each of our ZabbixAPI.classes dict values
+ # Create a method and attach it to our subclass.
+ # e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
+ # 'update', 'iswritable', 'logout', 'addmedia', 'create',
+ # 'login', 'deletemedia', 'isreadable'],
+ # User.delete
+ # User.get
+ for method_name in method_names:
+ meta_method(Class, method_name)
+ # Return our subclass with all methods attached
+ return Class
+
+# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
+for class_name, method_names in ZabbixAPI.classes.items():
+ setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names))
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ user=dict(default=None, type='str'),
+ password=dict(default=None, type='str'),
+ zbx_class=dict(choices=ZabbixAPI.classes.keys()),
+ action=dict(default=None, type='str'),
+ params=dict(),
+ debug=dict(default=False, type='bool'),
+ ),
+ #supports_check_mode=True
+ )
+
+ user = module.params.get('user', None)
+ if not user:
+ user = os.environ['ZABBIX_USER']
+
+ pw = module.params.get('password', None)
+ if not pw:
+ pw = os.environ['ZABBIX_PASSWORD']
+
+ server = module.params['server']
+
+ if module.params['debug']:
+ options['debug'] = True
+
+ api_data = {
+ 'user': user,
+ 'password': pw,
+ 'server': server,
+ }
+
+ if not user or not pw or not server:
+ module.fail_json('Please specify the user, password, and the zabbix server.')
+
+ zapi = ZabbixAPI(api_data)
+
+ zbx_class = module.params.get('zbx_class')
+ action = module.params.get('action')
+ params = module.params.get('params', {})
+
+
+ # Get the instance we are trying to call
+ zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
+ # Get the instance's method we are trying to call
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action]
+ # Make the call with the incoming params
+ results = zbx_action_method(zbx_class_inst, **params)
+
+ # Results Section
+ changed_state = False
+ status = results[0]['status']
+ if status not in ['200', '201']:
+ #changed_state = False
+ module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1]))
+
+ module.exit_json(**{'results': results[1]['result']})
+
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
new file mode 100644
index 000000000..51ecd5d34
--- /dev/null
+++ b/roles/yum_repos/README.md
@@ -0,0 +1,113 @@
+Yum Repos
+=========
+
+This role allows easy deployment of yum repository config files.
+
+Requirements
+------------
+
+Yum
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------|---------------|--------------------------------------------|
+| repo_files | None | |
+| repo_enabled | 1 | Should repos be enabled by default |
+| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default |
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+A single repo file containing a single repo:
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repo
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ skip_if_unavailable: yes
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+
+A single repo file containing a single repo, disabling gpgcheck
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgcheck: no
+
+A single repo file containing a single disabled repo
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ enabled: no
+
+A single repo file containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+
+Multiple repo files containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+ - id: joes_repos
+ repos:
+ - id: joes_repo
+ name: Joe's Less Awesome Repo
+ baseurl: https://joes.repo/is/here
+ gpgkey: https://joes.repo/pubkey.gpg
+ - id: joes_otherrepo
+ name: Joe's Other Less Awesome Repo
+ baseurl: https://joes.repo/is/there
+ gpgkey: https://joes.repo/pubkey.gpg
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+openshift online operations
diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml
new file mode 100644
index 000000000..515fb7a4a
--- /dev/null
+++ b/roles/yum_repos/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+repo_enabled: 1
+repo_gpgcheck: 1
diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml
new file mode 100644
index 000000000..6b8374da9
--- /dev/null
+++ b/roles/yum_repos/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: openshift operations
+ description:
+ company: Red Hat, Inc.
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml
new file mode 100644
index 000000000..a9903c6c6
--- /dev/null
+++ b/roles/yum_repos/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# Convert old params to new params
+- set_fact:
+ repo_files:
+ - id: "{{ repo_tag }}"
+ repos:
+ - id: "{{ repo_tag }}"
+ name: "{{ repo_name }}"
+ baseurl: "{{ repo_baseurl }}"
+ enabled: "{{ repo_enabled }}"
+ gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}"
+ sslverify: "{{ repo_sslverify | default(None) }}"
+ sslclientcert: "{{ repo_sslclientcert | default(None) }}"
+ sslclientkey: "{{ repo_sslclientkey | default(None) }}"
+ gpgkey: "{{ repo_gpgkey | default(None) }}"
+ when: repo_files is not defined
+
+- name: Verify repo_files is a list
+ assert:
+ that:
+ - repo_files is iterable and repo_files is not string and repo_files is not mapping
+
+- name: Verify repo_files items have an id and a repos list
+ assert:
+ that:
+ - item is mapping
+ - "'id' in item"
+ - "'repos' in item"
+ - item.repos is iterable and item.repos is not string and item.repos is not mapping
+ with_items: repo_files
+
+- name: Verify that repo_files.repos have the required keys
+ assert:
+ that:
+ - item.1 is mapping
+ - "'id' in item.1"
+ - "'name' in item.1"
+ - "'baseurl' in item.1"
+ with_subelements:
+ - repo_files
+ - repos
+
+- name: Installing yum-repo template
+ template:
+ src: yumrepo.j2
+ dest: /etc/yum.repos.d/{{ item.id }}.repo
+ with_items: repo_files
diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2
new file mode 100644
index 000000000..0dfdbfe43
--- /dev/null
+++ b/roles/yum_repos/templates/yumrepo.j2
@@ -0,0 +1,18 @@
+{% set repos = item.repos %}
+{% for repo in repos %}
+[{{ repo.id }}]
+name={{ repo.name }}
+baseurl={{ repo.baseurl }}
+{% set repo_enabled_value = repo.enabled | default(repo_enabled) %}
+{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %}
+enabled={{ enable_repo }}
+{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %}
+{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %}
+gpgcheck={{ enable_gpgcheck }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+
+{% endfor %}