summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md1
-rw-r--r--README_libvirt.md92
-rwxr-xr-xbin/cluster6
-rwxr-xr-xbin/ohi110
-rw-r--r--bin/openshift-ansible-bin.spec16
-rw-r--r--bin/openshift_ansible/awsutil.py40
-rwxr-xr-xbin/opssh30
-rw-r--r--inventory/libvirt/group_vars/all2
-rw-r--r--inventory/libvirt/hosts2
l---------playbooks/libvirt/openshift-cluster/filter_plugins1
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml65
-rw-r--r--playbooks/libvirt/openshift-cluster/launch_instances.yml102
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml43
l---------playbooks/libvirt/openshift-cluster/roles1
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml41
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml7
-rw-r--r--playbooks/libvirt/openshift-master/config.yml21
l---------playbooks/libvirt/openshift-master/filter_plugins1
l---------playbooks/libvirt/openshift-master/roles1
-rw-r--r--playbooks/libvirt/openshift-master/vars.yml1
-rw-r--r--playbooks/libvirt/openshift-node/config.yml102
l---------playbooks/libvirt/openshift-node/filter_plugins1
l---------playbooks/libvirt/openshift-node/roles1
-rw-r--r--playbooks/libvirt/openshift-node/vars.yml1
-rw-r--r--playbooks/libvirt/templates/domain.xml62
-rw-r--r--playbooks/libvirt/templates/meta-data2
-rw-r--r--playbooks/libvirt/templates/user-data10
-rw-r--r--rel-eng/packages/openshift-ansible-bin2
-rw-r--r--roles/ansible_tower/tasks/main.yaml6
-rw-r--r--roles/openshift_ansible_inventory/README.md41
-rw-r--r--roles/openshift_ansible_inventory/defaults/main.yml4
-rw-r--r--roles/openshift_ansible_inventory/handlers/main.yml2
-rw-r--r--roles/openshift_ansible_inventory/meta/main.yml8
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml11
-rw-r--r--roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j211
-rw-r--r--roles/openshift_ansible_inventory/vars/main.yml2
-rw-r--r--roles/yum_repos/README.md113
-rw-r--r--roles/yum_repos/defaults/main.yml3
-rw-r--r--roles/yum_repos/meta/main.yml8
-rw-r--r--roles/yum_repos/tasks/main.yml47
-rw-r--r--roles/yum_repos/templates/yumrepo.j218
41 files changed, 1015 insertions, 23 deletions
diff --git a/README.md b/README.md
index e7fa89930..87dbfc1ea 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,7 @@ Setup
- Setup for a specific cloud:
- [AWS](README_AWS.md)
- [GCE](README_GCE.md)
+ - [local VMs](README_libvirt.md)
- Build
- [How to build the openshift-ansible rpms](BUILD.md)
diff --git a/README_libvirt.md b/README_libvirt.md
new file mode 100644
index 000000000..fd2eb57f6
--- /dev/null
+++ b/README_libvirt.md
@@ -0,0 +1,92 @@
+
+LIBVIRT Setup instructions
+==========================
+
+`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
+
+This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+
+Install dependencies
+--------------------
+
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+ * ``systemctl enable libvirtd``
+ * ``systemctl start libvirtd``
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
+
+```
+sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
+polkit.addRule(function(action, subject) {
+ if (action.id == "org.libvirt.unix.manage" &&
+ subject.user == "$USER") {
+ return polkit.Result.YES;
+ polkit.log("action=" + action);
+ polkit.log("subject=" + subject);
+ }
+});
+EOF
+```
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:
+* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
+ * backed by a filesystem with a lot of free disk space
+ * writable by your user;
+ * accessible by the qemu user.
+* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+Test the setup
+--------------
+
+```
+cd openshift-ansible
+
+bin/cluster create -m 1 -n 3 libvirt lenaic
+
+bin/cluster terminate libvirt lenaic
+```
diff --git a/bin/cluster b/bin/cluster
index 36ab1da1b..ca227721e 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
# vim: expandtab:tabstop=4:shiftwidth=4
import argparse
@@ -94,6 +94,8 @@ class Cluster(object):
os.environ[key] = config.get('ec2', key)
inventory = '-i inventory/aws/ec2.py'
+ elif 'libvirt' == provider:
+ inventory = '-i inventory/libvirt/hosts'
else:
# this code should never be reached
raise ValueError("invalid PROVIDER {}".format(provider))
@@ -139,7 +141,7 @@ if __name__ == '__main__':
cluster = Cluster()
- providers = ['gce', 'aws']
+ providers = ['gce', 'aws', 'libvirt']
parser = argparse.ArgumentParser(
description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
)
diff --git a/bin/ohi b/bin/ohi
new file mode 100755
index 000000000..408961ee4
--- /dev/null
+++ b/bin/ohi
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import traceback
+import sys
+import os
+import re
+import tempfile
+import time
+import subprocess
+import ConfigParser
+
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
+
+class Ohi(object):
+ def __init__(self):
+ self.inventory = None
+ self.host_type_aliases = {}
+ self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+ # Default the config path to /etc
+ self.config_path = os.path.join(os.path.sep, 'etc', \
+ 'openshift_ansible', \
+ 'openshift_ansible.conf')
+
+ self.parse_cli_args()
+ self.parse_config_file()
+
+ self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+
+ def run(self):
+ if self.args.list_host_types:
+ self.aws.print_host_types()
+ return 0
+
+ hosts = None
+ if self.args.host_type is not None and \
+ self.args.env is not None:
+ # Both env and host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type, \
+ env=self.args.env)
+
+ if self.args.host_type is None and \
+ self.args.env is not None:
+ # Only env specified
+ hosts = self.aws.get_host_list(env=self.args.env)
+
+ if self.args.host_type is not None and \
+ self.args.env is None:
+ # Only host-type specified
+ hosts = self.aws.get_host_list(host_type=self.args.host_type)
+
+ if hosts is None:
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
+
+ for host in hosts:
+ print host
+ return 0
+
+ def parse_config_file(self):
+ if os.path.isfile(self.config_path):
+ config = ConfigParser.ConfigParser()
+ config.read(self.config_path)
+
+ if config.has_section(CONFIG_MAIN_SECTION) and \
+ config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+ self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+ self.host_type_aliases = {}
+ if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+ value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+ self.host_type_aliases[alias] = value
+
+ def parse_cli_args(self):
+ """Setup the command line parser with the options we want
+ """
+
+ parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+
+ parser.add_argument('--list-host-types', default=False, action='store_true',
+ help='List all of the host types')
+
+ parser.add_argument('-e', '--env', action="store",
+ help="Which environment to use")
+
+ parser.add_argument('-t', '--host-type', action="store",
+ help="Which host type to use")
+
+ self.args = parser.parse_args()
+
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ print "\nError: No options given. Use --help to see the available options\n"
+ sys.exit(0)
+
+ try:
+ ohi = Ohi()
+ exitcode = ohi.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
+ print "\nError: %s\n" % e.message
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index f509bdd79..c7db6f684 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
Summary: OpenShift Ansible Scripts for working with metadata hosts
Name: openshift-ansible-bin
-Version: 0.0.5
+Version: 0.0.8
Release: 1%{?dist}
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -23,7 +23,7 @@ mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
mkdir -p %{buildroot}/etc/bash_completion.d
mkdir -p %{buildroot}/etc/openshift_ansible
-cp -p ossh oscp opssh %{buildroot}%{_bindir}
+cp -p ossh oscp opssh ohi %{buildroot}%{_bindir}
cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
@@ -36,6 +36,18 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
%config(noreplace) /etc/openshift_ansible/
%changelog
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
+- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
+
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
+- added the ability to run opssh and ohi on all hosts in an environment, as
+ well as all hosts of the same host-type regardless of environment
+ (twiest@redhat.com)
+- added ohi (twiest@redhat.com)
+* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- fixed bug where opssh would throw an exception if pssh returned a non-zero
+ exit code (twiest@redhat.com)
+
* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
- fixed the opssh default output behavior to be consistent with pssh. Also
fixed a bug in how directories are named for --outdir and --errdir.
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
index 8fef0a24f..65b269930 100644
--- a/bin/openshift_ansible/awsutil.py
+++ b/bin/openshift_ansible/awsutil.py
@@ -5,6 +5,10 @@ import os
import json
import re
+class ArgumentError(Exception):
+ def __init__(self, message):
+ self.message = message
+
class AwsUtil(object):
def __init__(self, inventory_path=None, host_type_aliases={}):
self.host_type_aliases = host_type_aliases
@@ -128,15 +132,45 @@ class AwsUtil(object):
return self.alias_lookup[host_type]
return host_type
+ def gen_env_tag(self, env):
+ """Generate the environment tag
+ """
+ return "tag_environment_%s" % env
+
+ def gen_host_type_tag(self, host_type):
+ """Generate the host type tag
+ """
+ host_type = self.resolve_host_type(host_type)
+ return "tag_host-type_%s" % host_type
+
def gen_env_host_type_tag(self, host_type, env):
"""Generate the environment host type tag
"""
host_type = self.resolve_host_type(host_type)
return "tag_env-host-type_%s-%s" % (env, host_type)
- def get_host_list(self, host_type, env):
+ def get_host_list(self, host_type=None, env=None):
"""Get the list of hosts from the inventory using host-type and environment
"""
inv = self.get_inventory()
- host_type_tag = self.gen_env_host_type_tag(host_type, env)
- return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is not None:
+ # Both host type and environment were specified
+ env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+ return inv[env_host_type_tag]
+
+ if host_type is None and \
+ env is not None:
+ # Just environment was specified
+ host_type_tag = self.gen_env_tag(env)
+ return inv[host_type_tag]
+
+ if host_type is not None and \
+ env is None:
+ # Just host-type was specified
+ host_type_tag = self.gen_host_type_tag(host_type)
+ return inv[host_type_tag]
+
+ # We should never reach here!
+ raise ArgumentError("Invalid combination of parameters")
diff --git a/bin/opssh b/bin/opssh
index ad1aadc29..a4fceb6a8 100755
--- a/bin/opssh
+++ b/bin/opssh
@@ -12,6 +12,7 @@ import subprocess
import ConfigParser
from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
DEFAULT_PSSH_PAR = 200
PSSH = '/usr/bin/pssh'
@@ -19,7 +20,6 @@ CONFIG_MAIN_SECTION = 'main'
CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
CONFIG_INVENTORY_OPTION = 'inventory'
-
class Opssh(object):
def __init__(self):
self.inventory = None
@@ -36,21 +36,17 @@ class Opssh(object):
self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+ def run(self):
if self.args.list_host_types:
self.aws.print_host_types()
- return
-
- if self.args.env and \
- self.args.host_type and \
- self.args.command:
- retval = self.run_pssh()
- if retval != 0:
- raise ValueError("pssh run failed")
+ return 0
- return
+ if self.args.host_type is not None or \
+ self.args.env is not None:
+ return self.run_pssh()
- # If it makes it here, we weren't able to determine what they wanted to do
- raise ValueError("Invalid combination of arguments")
+ # We weren't able to determine what they wanted to do
+ raise ArgumentError("Invalid combination of arguments")
def run_pssh(self):
"""Actually run the pssh command based off of the supplied options
@@ -68,7 +64,9 @@ class Opssh(object):
if self.args.errdir:
pssh_args.extend(["--errdir", self.args.errdir])
- hosts = self.aws.get_host_list(self.args.host_type, self.args.env)
+ hosts = self.aws.get_host_list(host_type=self.args.host_type,
+ env=self.args.env)
+
with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
for h in hosts:
f.write(h + os.linesep)
@@ -111,7 +109,7 @@ class Opssh(object):
parser.add_argument('-e', '--env', action="store",
help="Which environment to use")
- parser.add_argument('-t', '--host-type', action="store",
+ parser.add_argument('-t', '--host-type', action="store", default=None,
help="Which host type to use")
parser.add_argument('-c', '--command', action='store',
@@ -142,5 +140,7 @@ if __name__ == '__main__':
try:
opssh = Opssh()
- except ValueError as e:
+ exitcode = opssh.run()
+ sys.exit(exitcode)
+ except ArgumentError as e:
print "\nError: %s\n" % e.message
diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all
new file mode 100644
index 000000000..b22da00de
--- /dev/null
+++ b/inventory/libvirt/group_vars/all
@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root
diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts
new file mode 100644
index 000000000..6a818f268
--- /dev/null
+++ b/inventory/libvirt/hosts
@@ -0,0 +1,2 @@
+# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
+localhost ansible_python_interpreter=/usr/bin/python2
diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/libvirt/openshift-cluster/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
new file mode 100644
index 000000000..6f2df33af
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -0,0 +1,65 @@
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+ libvirt_storage_pool: 'openshift'
+ libvirt_uri: 'qemu:///system'
+
+ vars_files:
+ - vars.yml
+
+ tasks:
+ - set_fact:
+ k8s_type: master
+
+ - name: Generate master instance name(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+ register: master_names_output
+ with_sequence: start=1 end='{{ num_masters }}'
+
+ - set_fact:
+ master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: '{{ master_names }}'
+ cluster: '{{ cluster_id }}'
+ type: '{{ k8s_type }}'
+ group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+
+ - set_fact:
+ k8s_type: node
+
+ - name: Generate node instance name(s)
+ set_fact:
+ scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+ register: node_names_output
+ with_sequence: start=1 end='{{ num_nodes }}'
+
+ - set_fact:
+ node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+ - include: launch_instances.yml
+ vars:
+ instances: '{{ node_names }}'
+ cluster: '{{ cluster_id }}'
+ type: '{{ k8s_type }}'
+
+- hosts: 'tag_env-{{ cluster_id }}'
+ roles:
+ - openshift_repos
+ - os_update_latest
+
+- include: ../openshift-master/config.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+ oo_env: '{{ cluster_id }}'
+
+- include: ../openshift-node/config.yml
+ vars:
+ oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+ oo_env: '{{ cluster_id }}'
diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml
new file mode 100644
index 000000000..3bbcae981
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/launch_instances.yml
@@ -0,0 +1,102 @@
+- name: Create the libvirt storage directory for openshift
+ file:
+ dest: '{{ libvirt_storage_pool_path }}'
+ state: directory
+
+- name: Download Base Cloud image
+ get_url:
+ url: '{{ base_image_url }}'
+ sha256sum: '{{ base_image_sha256 }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+
+- name: Create the cloud-init config drive path
+ file:
+ dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ state: directory
+ with_items: '{{ instances }}'
+
+- name: Create the cloud-init config drive files
+ template:
+ src: '{{ item[1] }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+ with_nested:
+ - '{{ instances }}'
+ - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+ command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ args:
+ chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+ creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ with_items: '{{ instances }}'
+
+- name: Create the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+ ignore_errors: yes
+
+- name: Refresh the libvirt storage pool for openshift
+ command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+
+- name: Create VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
+ with_items: '{{ instances }}'
+
+- name: Create VMs
+ virt:
+ name: '{{ item }}'
+ command: define
+ xml: "{{ lookup('template', '../templates/domain.xml') }}"
+ uri: '{{ libvirt_uri }}'
+ with_items: '{{ instances }}'
+
+- name: Start VMs
+ virt:
+ name: '{{ item }}'
+ state: running
+ uri: '{{ libvirt_uri }}'
+ with_items: '{{ instances }}'
+
+- name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: '{{ instances }}'
+
+- name: Wait for the VMs to get an IP
+ command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+ ignore_errors: yes
+ register: nb_allocated_ips
+ until: nb_allocated_ips.stdout == '{{ instances | length }}'
+ retries: 30
+ delay: 1
+
+- name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: '{{ scratch_mac.results }}'
+
+- set_fact:
+ ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+ add_host:
+ hostname: '{{ item.0 }}'
+ ansible_ssh_host: '{{ item.1 }}'
+ ansible_ssh_user: root
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ with_together:
+ - instances
+ - ips
+
+- name: Wait for ssh
+ wait_for:
+ host: '{{ item }}'
+ port: 22
+ with_items: ips
+
+- name: Wait for root user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_items: ips
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
new file mode 100644
index 000000000..6bf07e3c6
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -0,0 +1,43 @@
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_uri: 'qemu:///system'
+
+ tasks:
+ - name: List VMs
+ virt:
+ command: list_vms
+ register: list_vms
+
+ - name: Collect MAC addresses of the VMs
+ shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+ register: scratch_mac
+ with_items: '{{ list_vms.list_vms }}'
+ when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Collect IP addresses of the VMs
+ shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ register: scratch_ip
+ with_items: '{{ scratch_mac.results }}'
+ when: item.skipped is not defined
+
+ - name: Add hosts
+ add_host:
+ hostname: '{{ item[0] }}'
+ ansible_ssh_host: '{{ item[1].stdout }}'
+ ansible_ssh_user: root
+ groups: oo_list_hosts
+ with_together:
+ - '{{ list_vms.list_vms }}'
+ - '{{ scratch_ip.results }}'
+ when: item[1].skipped is not defined
+
+- name: List Hosts
+ hosts: oo_list_hosts
+
+ tasks:
+ - debug:
+ msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/libvirt/openshift-cluster/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..c609169d3
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -0,0 +1,41 @@
+- name: Terminate instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+ libvirt_storage_pool: 'openshift'
+ libvirt_uri: 'qemu:///system'
+
+ tasks:
+ - name: List VMs
+ virt:
+ command: list_vms
+ register: list_vms
+
+ - name: Destroy VMs
+ virt:
+ name: '{{ item[0] }}'
+ command: '{{ item[1] }}'
+ uri: '{{ libvirt_uri }}'
+ with_nested:
+ - '{{ list_vms.list_vms }}'
+ - [ destroy, undefine ]
+ when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Delete VMs config drive
+ file:
+ path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
+ state: absent
+ with_items: '{{ list_vms.list_vms }}'
+ when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+ - name: Delete VMs drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
+ with_nested:
+ - '{{ list_vms.list_vms }}'
+ - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
+ when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
new file mode 100644
index 000000000..4e4eecd46
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -0,0 +1,7 @@
+# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+
+base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
+base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml
new file mode 100644
index 000000000..dd95fd57f
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/config.yml
@@ -0,0 +1,21 @@
+- name: master/config.yml, populate oo_masters_to_config host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: "Evaluate oo_host_group_exp if it's set"
+ add_host:
+ name: '{{ item }}'
+ groups: oo_masters_to_config
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+- name: Configure instances
+ hosts: oo_masters_to_config
+ vars:
+ openshift_hostname: '{{ ansible_default_ipv4.address }}'
+ vars_files:
+ - vars.yml
+ roles:
+ - openshift_master
+ - pods
+ - os_env_extras
diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/libvirt/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/libvirt/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml
new file mode 100644
index 000000000..ad0c0fbe2
--- /dev/null
+++ b/playbooks/libvirt/openshift-master/vars.yml
@@ -0,0 +1 @@
+openshift_debug_level: 4
diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml
new file mode 100644
index 000000000..3244a8046
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/config.yml
@@ -0,0 +1,102 @@
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: "Evaluate oo_host_group_exp if it's set"
+ add_host:
+ name: '{{ item }}'
+ groups: oo_nodes_to_config
+ with_items: "{{ oo_host_group_exp | default('') }}"
+ when: oo_host_group_exp is defined
+
+ - add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ groups: oo_first_master
+ when: oo_host_group_exp is defined
+
+
+- name: Gather and set facts for hosts to configure
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Since the master is registering the nodes before they are configured, we
+ # need to make sure to set the node properties beforehand if we do not want
+ # the defaults
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ ansible_default_ipv4.address }}"
+ - role: node
+ local_facts:
+ external_id: "{{ openshift_node_external_id | default(None) }}"
+ resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+ resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+ pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+ labels: "{{ openshfit_node_labels | default(None) }}"
+ annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+ hosts: oo_first_master
+ vars:
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ roles:
+ - openshift_register_nodes
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: mktemp
+
+ - name: Sync master certs to localhost
+ synchronize:
+ mode: pull
+ checksum: yes
+ src: /var/lib/openshift/openshift.local.certificates
+ dest: "{{ mktemp.stdout }}"
+
+- name: Configure instances
+ hosts: oo_nodes_to_config
+ vars_files:
+ - vars.yml
+ vars:
+ sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+ cert_parent_rel_path: openshift.local.certificates
+ cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+ cert_base_path: /var/lib/openshift
+ cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+ cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+ pre_tasks:
+ - name: Ensure certificate directories exists
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ cert_path }}"
+ - "{{ cert_parent_path }}/ca"
+
+ # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # possibly test service started time against certificate/config file
+ # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ - name: Sync certs to nodes
+ synchronize:
+ checksum: yes
+ src: "{{ item.src }}"
+ dest: "{{ item.dest }}"
+ owner: no
+ group: no
+ with_items:
+ - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+ dest: "{{ cert_parent_path }}"
+ - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+ dest: "{{ cert_parent_path }}/ca/cert.crt"
+ - local_action: file name={{ sync_tmpdir }} state=absent
+ run_once: true
+ roles:
+ - openshift_node
+ - os_env_extras
+ - os_env_extras_node
diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/libvirt/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml
new file mode 100644
index 000000000..ad0c0fbe2
--- /dev/null
+++ b/playbooks/libvirt/openshift-node/vars.yml
@@ -0,0 +1 @@
+openshift_debug_level: 4
diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/templates/domain.xml
new file mode 100644
index 000000000..da037d138
--- /dev/null
+++ b/playbooks/libvirt/templates/domain.xml
@@ -0,0 +1,62 @@
+<domain type='kvm' id='8'>
+ <name>{{ item }}</name>
+ <memory unit='GiB'>1</memory>
+ <currentMemory unit='GiB'>1</currentMemory>
+ <vcpu placement='static'>2</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <clock offset='utc'>
+ <timer name='rtc' tickpolicy='catchup'/>
+ <timer name='pit' tickpolicy='delay'/>
+ <timer name='hpet' present='no'/>
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='file' device='cdrom'>
+ <driver name='qemu' type='raw'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <target dev='vdb' bus='virtio'/>
+ <readonly/>
+ </disk>
+ <controller type='usb' index='0' />
+ <interface type='network'>
+ <source network='default'/>
+ <model type='virtio'/>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <channel type='spicevmc'>
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>
+ <input type='tablet' bus='usb' />
+ <input type='mouse' bus='ps2'/>
+ <input type='keyboard' bus='ps2'/>
+ <graphics type='spice' autoport='yes' />
+ <video>
+ <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
+ </video>
+ <redirdev bus='usb' type='spicevmc'>
+ </redirdev>
+ <memballoon model='virtio'>
+ </memballoon>
+ </devices>
+</domain>
diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data
new file mode 100644
index 000000000..5d779519f
--- /dev/null
+++ b/playbooks/libvirt/templates/meta-data
@@ -0,0 +1,2 @@
+instance-id: {{ item[0] }}
+local-hostname: {{ item[0] }}
diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data
new file mode 100644
index 000000000..985badc8e
--- /dev/null
+++ b/playbooks/libvirt/templates/user-data
@@ -0,0 +1,10 @@
+#cloud-config
+
+disable_root: 0
+
+system_info:
+ default_user:
+ name: root
+
+ssh_authorized_keys:
+ - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index 99ae75e8b..500e1f4b1 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.5-1 bin/
+0.0.8-1 bin/
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
index f58a5b1c2..e9bde9478 100644
--- a/roles/ansible_tower/tasks/main.yaml
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -25,3 +25,9 @@
- name: Open firewalld port for https
firewalld: port=8080/tcp permanent=true state=enabled
+- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect state=yes persistent=yes
+
+- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
+ seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
+
diff --git a/roles/openshift_ansible_inventory/README.md b/roles/openshift_ansible_inventory/README.md
new file mode 100644
index 000000000..69a07effd
--- /dev/null
+++ b/roles/openshift_ansible_inventory/README.md
@@ -0,0 +1,41 @@
+Openshift Ansible Inventory
+=========
+
+Install and configure openshift-ansible-inventory.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+oo_inventory_group
+oo_inventory_user
+oo_inventory_accounts
+oo_inventory_cache_max_age
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/roles/openshift_ansible_inventory/defaults/main.yml b/roles/openshift_ansible_inventory/defaults/main.yml
new file mode 100644
index 000000000..f53c00c80
--- /dev/null
+++ b/roles/openshift_ansible_inventory/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+oo_inventory_group: root
+oo_inventory_owner: root
+oo_inventory_cache_max_age: 1800
diff --git a/roles/openshift_ansible_inventory/handlers/main.yml b/roles/openshift_ansible_inventory/handlers/main.yml
new file mode 100644
index 000000000..e2db43477
--- /dev/null
+++ b/roles/openshift_ansible_inventory/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_ansible_inventory
diff --git a/roles/openshift_ansible_inventory/meta/main.yml b/roles/openshift_ansible_inventory/meta/main.yml
new file mode 100644
index 000000000..ff3df0a7d
--- /dev/null
+++ b/roles/openshift_ansible_inventory/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Install and configure openshift-ansible-inventory
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
new file mode 100644
index 000000000..3990d5750
--- /dev/null
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- yum:
+ name: openshift-ansible-inventory
+ state: present
+
+- template:
+ src: multi_ec2.yaml.j2
+ dest: /etc/ansible/multi_ec2.yaml
+ group: "{{ oo_inventory_group }}"
+ owner: "{{ oo_inventory_owner }}"
+ mode: "0640"
diff --git a/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2 b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
new file mode 100644
index 000000000..23dfe73b8
--- /dev/null
+++ b/roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2
@@ -0,0 +1,11 @@
+# multi ec2 inventory configs
+cache_max_age: {{ oo_inventory_cache_max_age }}
+accounts:
+{% for account in oo_inventory_accounts %}
+ - name: {{ account.name }}
+ provider: {{ account.provider }}
+ env_vars:
+ AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+
+{% endfor %}
diff --git a/roles/openshift_ansible_inventory/vars/main.yml b/roles/openshift_ansible_inventory/vars/main.yml
new file mode 100644
index 000000000..25c049282
--- /dev/null
+++ b/roles/openshift_ansible_inventory/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for openshift_ansible_inventory
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
new file mode 100644
index 000000000..51ecd5d34
--- /dev/null
+++ b/roles/yum_repos/README.md
@@ -0,0 +1,113 @@
+Yum Repos
+=========
+
+This role allows easy deployment of yum repository config files.
+
+Requirements
+------------
+
+Yum
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------|---------------|--------------------------------------------|
+| repo_files | None | |
+| repo_enabled | 1 | Should repos be enabled by default |
+| repo_gpgcheck | 1 | Should repo gpgcheck be enabled by default |
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+A single repo file containing a single repo:
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repo
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ skip_if_unavailable: yes
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+
+A single repo file containing a single repo, disabling gpgcheck
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgcheck: no
+
+A single repo file containing a single disabled repo
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_other_repo
+ repos:
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ enabled: no
+
+A single repo file containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+
+Multiple repo files containing multiple repos
+ - hosts: servers
+ roles:
+ - role: yum_repos
+ repo_files:
+ - id: my_repos
+ repos:
+ - id: my_repo
+ name: My Awesome Repo
+ baseurl: https://my.awesome.repo/is/available/here
+ gpgkey: https://my.awesome.repo/pubkey.gpg
+ - id: my_other_repo
+ name: My Other Awesome Repo
+ baseurl: https://my.other.awesome.repo/is/available/here
+ gpgkey: https://my.other.awesome.repo/pubkey.gpg
+ - id: joes_repos
+ repos:
+ - id: joes_repo
+ name: Joe's Less Awesome Repo
+ baseurl: https://joes.repo/is/here
+ gpgkey: https://joes.repo/pubkey.gpg
+ - id: joes_otherrepo
+ name: Joe's Other Less Awesome Repo
+ baseurl: https://joes.repo/is/there
+ gpgkey: https://joes.repo/pubkey.gpg
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+openshift online operations
diff --git a/roles/yum_repos/defaults/main.yml b/roles/yum_repos/defaults/main.yml
new file mode 100644
index 000000000..515fb7a4a
--- /dev/null
+++ b/roles/yum_repos/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+repo_enabled: 1
+repo_gpgcheck: 1
diff --git a/roles/yum_repos/meta/main.yml b/roles/yum_repos/meta/main.yml
new file mode 100644
index 000000000..6b8374da9
--- /dev/null
+++ b/roles/yum_repos/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: openshift operations
+ description:
+ company: Red Hat, Inc.
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/yum_repos/tasks/main.yml b/roles/yum_repos/tasks/main.yml
new file mode 100644
index 000000000..a9903c6c6
--- /dev/null
+++ b/roles/yum_repos/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# Convert old params to new params
+- set_fact:
+ repo_files:
+ - id: "{{ repo_tag }}"
+ repos:
+ - id: "{{ repo_tag }}"
+ name: "{{ repo_name }}"
+ baseurl: "{{ repo_baseurl }}"
+ enabled: "{{ repo_enabled }}"
+ gpgcheck: "{{ repo_gpg_check | default(repo_gpgcheck) }}"
+ sslverify: "{{ repo_sslverify | default(None) }}"
+ sslclientcert: "{{ repo_sslclientcert | default(None) }}"
+ sslclientkey: "{{ repo_sslclientkey | default(None) }}"
+ gpgkey: "{{ repo_gpgkey | default(None) }}"
+ when: repo_files is not defined
+
+- name: Verify repo_files is a list
+ assert:
+ that:
+ - repo_files is iterable and repo_files is not string and repo_files is not mapping
+
+- name: Verify repo_files items have an id and a repos list
+ assert:
+ that:
+ - item is mapping
+ - "'id' in item"
+ - "'repos' in item"
+ - item.repos is iterable and item.repos is not string and item.repos is not mapping
+ with_items: repo_files
+
+- name: Verify that repo_files.repos have the required keys
+ assert:
+ that:
+ - item.1 is mapping
+ - "'id' in item.1"
+ - "'name' in item.1"
+ - "'baseurl' in item.1"
+ with_subelements:
+ - repo_files
+ - repos
+
+- name: Installing yum-repo template
+ template:
+ src: yumrepo.j2
+ dest: /etc/yum.repos.d/{{ item.id }}.repo
+ with_items: repo_files
diff --git a/roles/yum_repos/templates/yumrepo.j2 b/roles/yum_repos/templates/yumrepo.j2
new file mode 100644
index 000000000..0dfdbfe43
--- /dev/null
+++ b/roles/yum_repos/templates/yumrepo.j2
@@ -0,0 +1,18 @@
+{% set repos = item.repos %}
+{% for repo in repos %}
+[{{ repo.id }}]
+name={{ repo.name }}
+baseurl={{ repo.baseurl }}
+{% set repo_enabled_value = repo.enabled | default(repo_enabled) %}
+{% set enable_repo = 1 if (repo_enabled_value | int(0) == 1 or repo_enabled_value | lower in ['true', 'yes']) else 0 %}
+enabled={{ enable_repo }}
+{% set repo_gpgcheck_value = repo.gpgcheck | default(repo_gpgcheck) %}
+{% set enable_gpgcheck = 1 if (repo_gpgcheck_value | int(0) == 1 or repo_gpgcheck_value | lower in ['true', 'yes']) else 0 %}
+gpgcheck={{ enable_gpgcheck }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined and value != '' %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+
+{% endfor %}