summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTroy Dawson <tdawson@redhat.com>2015-06-12 12:49:37 -0500
committerTroy Dawson <tdawson@redhat.com>2015-06-12 12:49:37 -0500
commitc650920bc7b0043e59fa3439f48f61d5fa211f2d (patch)
tree3e1f882f5bc7fe419f13a134a71927cb6484fa86
parent124ca40c134a40b2e6823ab3c4bfe329580d7eaa (diff)
parent42806b6745c747843b71eaf08b62aeee5e450ab1 (diff)
downloadopenshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.gz
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.bz2
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.tar.xz
openshift-c650920bc7b0043e59fa3439f48f61d5fa211f2d.zip
Merge branch 'master' into prod
-rw-r--r--README.md14
-rw-r--r--README_AWS.md2
-rw-r--r--README_OSE.md2
-rw-r--r--README_openstack.md80
-rwxr-xr-xbin/cluster67
-rw-r--r--bin/openshift-ansible-bin.spec9
-rwxr-xr-xcloud.rb29
-rw-r--r--docs/best_practices_guide.adoc218
-rw-r--r--docs/core_concepts_guide.adoc43
-rw-r--r--docs/style_guide.adoc138
-rw-r--r--filter_plugins/oo_filters.py396
-rw-r--r--git/.pylintrc7
-rw-r--r--inventory/byo/hosts11
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py95
-rwxr-xr-xinventory/multi_ec2.py13
-rw-r--r--inventory/openshift-ansible-inventory.spec13
-rw-r--r--inventory/openstack/hosts/hosts1
-rw-r--r--inventory/openstack/hosts/nova.ini45
-rwxr-xr-xinventory/openstack/hosts/nova.py224
-rw-r--r--lib/ansible_helper.rb94
-rw-r--r--lib/aws_command.rb148
-rw-r--r--lib/aws_helper.rb85
-rw-r--r--lib/gce_command.rb228
-rw-r--r--lib/gce_helper.rb94
-rw-r--r--lib/launch_helper.rb30
-rw-r--r--playbooks/aws/ansible-tower/launch.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml1
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml8
-rw-r--r--playbooks/aws/openshift-cluster/service.yml28
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml3
-rw-r--r--playbooks/aws/openshift-master/launch.yml10
-rw-r--r--playbooks/aws/openshift-node/config.yml1
-rw-r--r--playbooks/aws/openshift-node/launch.yml10
-rw-r--r--playbooks/aws/os2-atomic-proxy/config.yml20
-rw-r--r--playbooks/aws/os2-atomic-proxy/launch.yml97
-rw-r--r--playbooks/aws/os2-atomic-proxy/user_data.txt6
-rw-r--r--playbooks/aws/os2-atomic-proxy/vars.int.yml3
-rw-r--r--playbooks/aws/os2-atomic-proxy/vars.prod.yml3
-rw-r--r--playbooks/aws/os2-atomic-proxy/vars.stg.yml10
-rw-r--r--playbooks/byo/openshift-node/config.yml4
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml5
-rw-r--r--playbooks/common/openshift-master/service.yml18
-rw-r--r--playbooks/common/openshift-node/config.yml81
-rw-r--r--playbooks/common/openshift-node/service.yml18
-rw-r--r--playbooks/gce/openshift-cluster/config.yml1
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml16
-rw-r--r--playbooks/gce/openshift-cluster/list.yml2
-rw-r--r--playbooks/gce/openshift-cluster/service.yml28
-rw-r--r--playbooks/gce/openshift-cluster/wip.yml26
-rw-r--r--playbooks/gce/openshift-node/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml32
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml12
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml35
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yml149
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data7
l---------playbooks/openstack/openshift-cluster/filter_plugins (renamed from playbooks/aws/os2-atomic-proxy/filter_plugins)0
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml31
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml24
l---------playbooks/openstack/openshift-cluster/roles (renamed from playbooks/aws/os2-atomic-proxy/roles)0
-rw-r--r--playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml27
-rw-r--r--playbooks/openstack/openshift-cluster/tasks/launch_instances.yml48
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml43
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml18
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml39
-rw-r--r--rel-eng/packages/openshift-ansible-bin2
-rw-r--r--rel-eng/packages/openshift-ansible-inventory2
-rw-r--r--roles/atomic_base/README.md56
-rw-r--r--roles/atomic_base/files/bash/bashrc12
-rw-r--r--roles/atomic_base/files/ostree/repo_config10
-rw-r--r--roles/atomic_base/files/system/90-nofile.conf7
-rw-r--r--roles/atomic_base/meta/main.yml19
-rw-r--r--roles/atomic_base/tasks/bash.yml14
-rw-r--r--roles/atomic_base/tasks/cloud_user.yml6
-rw-r--r--roles/atomic_base/tasks/main.yml4
-rw-r--r--roles/atomic_base/tasks/ostree.yml18
-rw-r--r--roles/atomic_base/tasks/system.yml3
-rw-r--r--roles/atomic_base/vars/main.yml2
-rw-r--r--roles/atomic_proxy/README.md56
-rw-r--r--roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json29
-rw-r--r--roles/atomic_proxy/files/puppet/auth.conf116
-rwxr-xr-xroles/atomic_proxy/files/setup-proxy-containers.sh43
-rw-r--r--roles/atomic_proxy/handlers/main.yml3
-rw-r--r--roles/atomic_proxy/meta/main.yml21
-rw-r--r--roles/atomic_proxy/tasks/main.yml3
-rw-r--r--roles/atomic_proxy/tasks/setup_containers.yml57
-rw-r--r--roles/atomic_proxy/tasks/setup_puppet.yml24
-rw-r--r--roles/atomic_proxy/templates/puppet/puppet.conf.j240
-rwxr-xr-xroles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j216
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j232
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j236
-rw-r--r--roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j233
-rw-r--r--roles/atomic_proxy/vars/main.yml2
-rwxr-xr-xroles/docker/files/enter-container.sh13
-rw-r--r--roles/docker/handlers/main.yml4
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker_storage/README.md39
-rw-r--r--roles/docker_storage/defaults/main.yml (renamed from playbooks/aws/os2-atomic-proxy/vars.yml)0
-rw-r--r--roles/docker_storage/handlers/main.yml1
-rw-r--r--roles/docker_storage/meta/main.yml9
-rw-r--r--roles/docker_storage/tasks/main.yml37
-rw-r--r--roles/docker_storage/vars/main.yml1
-rw-r--r--roles/etcd/README.md38
-rw-r--r--roles/etcd/handlers/main.yml3
-rw-r--r--roles/etcd/meta/main.yml124
-rw-r--r--roles/etcd/tasks/main.yml20
-rw-r--r--roles/etcd/templates/etcd.conf.j234
-rw-r--r--roles/fluentd_master/tasks/main.yml47
-rw-r--r--roles/fluentd_master/templates/kubernetes.conf.j29
-rw-r--r--roles/fluentd_node/tasks/main.yml55
-rw-r--r--roles/fluentd_node/templates/kubernetes.conf.j253
-rw-r--r--roles/fluentd_node/templates/td-agent.j22
-rw-r--r--roles/kube_nfs_volumes/README.md111
-rw-r--r--roles/kube_nfs_volumes/defaults/main.yml10
-rw-r--r--roles/kube_nfs_volumes/handlers/main.yml3
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py240
-rw-r--r--roles/kube_nfs_volumes/meta/main.yml16
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml25
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml16
-rw-r--r--roles/kube_nfs_volumes/templates/nfs.json.j223
-rw-r--r--roles/openshift_common/tasks/main.yml2
-rw-r--r--roles/openshift_common/vars/main.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py208
-rw-r--r--roles/openshift_master/README.md2
-rw-r--r--roles/openshift_master/defaults/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml124
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j298
-rw-r--r--roles/openshift_master/templates/scheduler.json.j212
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j278
-rw-r--r--roles/openshift_master/vars/main.yml13
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_node/defaults/main.yml4
-rw-r--r--roles/openshift_node/handlers/main.yml1
-rw-r--r--roles/openshift_node/tasks/main.yml71
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j218
-rw-r--r--roles/openshift_node/vars/main.yml3
-rw-r--r--roles/openshift_register_nodes/defaults/main.yml2
-rwxr-xr-xroles/openshift_register_nodes/library/kubernetes_register_node.py228
-rw-r--r--roles/openshift_register_nodes/tasks/main.yml64
-rw-r--r--roles/openshift_register_nodes/vars/main.yml13
-rw-r--r--roles/openshift_registry/README.md42
-rw-r--r--roles/openshift_registry/handlers/main.yml0
-rw-r--r--roles/openshift_registry/meta/main.yml (renamed from roles/openshift_sdn_node/meta/main.yml)6
-rw-r--r--roles/openshift_registry/tasks/main.yml11
-rw-r--r--roles/openshift_registry/vars/main.yml3
-rw-r--r--roles/openshift_repos/files/online/repos/enterprise-v3.repo8
-rw-r--r--roles/openshift_router/README.md (renamed from roles/openshift_sdn_master/README.md)18
-rw-r--r--roles/openshift_router/handlers/main.yml0
-rw-r--r--roles/openshift_router/meta/main.yml (renamed from roles/openshift_sdn_master/meta/main.yml)6
-rw-r--r--roles/openshift_router/tasks/main.yml11
-rw-r--r--roles/openshift_router/vars/main.yml3
-rw-r--r--roles/openshift_sdn_master/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml37
-rw-r--r--roles/openshift_sdn_node/README.md44
-rw-r--r--roles/openshift_sdn_node/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml60
-rwxr-xr-xroles/os_zabbix/library/zbxapi.py259
-rw-r--r--roles/pods/meta/main.yml6
159 files changed, 3397 insertions, 2644 deletions
diff --git a/README.md b/README.md
index 20f571ccc..2bdaefd4c 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,8 @@
-openshift-ansible
-========================
+#openshift-ansible
This repo contains OpenShift Ansible code.
-Setup
------
+##Setup
- Install base dependencies:
- Fedora:
```
@@ -30,10 +28,14 @@ Setup
- [How to build the openshift-ansible rpms](BUILD.md)
- Directory Structure:
- - [cloud.rb](cloud.rb) - light wrapper around Ansible
- [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
+ - [docs](docs) - Documentation for the project
- [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
- [inventory/](inventory) - houses Ansible dynamic inventory scripts
- - [lib/](lib) - library components of cloud.rb
- [playbooks/](playbooks) - houses host-type Ansible playbooks (launch, config, destroy, vars)
- [roles/](roles) - shareable Ansible tasks
+
+##Contributing
+
+###Feature Roadmap
+Our Feature Roadmap is available on the OpenShift Origin Infrastructure [Trello board](https://trello.com/b/nbkIrqKa/openshift-origin-infrastructure). All ansible items will be tagged with [installv3].
diff --git a/README_AWS.md b/README_AWS.md
index dc93357ee..7f4b1832b 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -18,7 +18,7 @@ Create a credentials file
```
source ~/.aws_creds
```
-Note: You must source this file in each shell that you want to run cloud.rb
+Note: You must source this file before running any Ansible commands.
(Optional) Setup your $HOME/.ssh/config file
diff --git a/README_OSE.md b/README_OSE.md
index 41a6f2935..dffabc714 100644
--- a/README_OSE.md
+++ b/README_OSE.md
@@ -80,7 +80,7 @@ ansible_ssh_user=root
deployment_type=enterprise
# Pre-release registry URL
-openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
# Pre-release additional repo
openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
diff --git a/README_openstack.md b/README_openstack.md
new file mode 100644
index 000000000..57977d1f5
--- /dev/null
+++ b/README_openstack.md
@@ -0,0 +1,80 @@
+OPENSTACK Setup instructions
+============================
+
+Requirements
+------------
+
+The OpenStack instance must have Neutron and Heat enabled.
+
+Install Dependencies
+--------------------
+
+1. The OpenStack python clients for Nova, Neutron and Heat are required:
+
+* `python-novaclient`
+* `python-neutronclient`
+* `python-heatclient`
+
+On RHEL / CentOS / Fedora:
+```
+ yum install -y ansible python-novaclient python-neutronclient python-heatclient
+```
+
+Configuration
+-------------
+
+The following options can be passed via the `-o` flag of the `create` command:
+
+* `image_name`: Name of the image to use to spawn VMs
+* `keypair` (default to `${LOGNAME}_key`): Name of the ssh key
+* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
+* `master_flavor_ram` (default to `2048`): VM flavor for the master (by amount of RAM)
+* `master_flavor_id`: VM flavor for the master (by ID)
+* `master_flavor_include`: VM flavor for the master (by name)
+* `node_flavor_ram` (default to `4096`): VM flavor for the nodes (by amount of RAM)
+* `node_flavor_id`: VM flavor for the nodes (by ID)
+* `node_flavor_include`: VM flavor for the nodes (by name)
+* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yml`): filename of the HEAT template to use to create the cluster infrastructure
+
+The following options are used only by `heat_stack.yml`. They are so used only if the `infra_heat_stack` option is left with its default value.
+
+* `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
+* `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
+* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yml`
+* `external_net` (default to `external`): Name of the external network to connect to
+* `floating_ip_pools` (default to `external`): comma separated list of floating IP pools
+* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
+
+
+Creating a cluster
+------------------
+
+1. To create a cluster with one master and two nodes
+
+```
+ bin/cluster create openstack <cluster-id>
+```
+
+2. To create a cluster with one master and three nodes, a custom VM image and custom DNS:
+
+```
+ bin/cluster create -n 3 -o image_name=rhel-7.1-openshift-2015.05.21 -o dns=172.16.50.210,172.16.50.250 openstack lenaic
+```
+
+Updating a cluster
+------------------
+
+1. To update the cluster
+
+```
+ bin/cluster update openstack <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+
+1. To terminate the cluster
+
+```
+ bin/cluster terminate openstack <cluster-id>
+```
diff --git a/bin/cluster b/bin/cluster
index 79f1f988f..2ea389523 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -9,8 +9,9 @@ import os
class Cluster(object):
"""
- Control and Configuration Interface for OpenShift Clusters
+ Provide Command, Control and Configuration (c3) Interface for OpenShift Clusters
"""
+
def __init__(self):
# setup ansible ssh environment
if 'ANSIBLE_SSH_ARGS' not in os.environ:
@@ -104,6 +105,21 @@ class Cluster(object):
return self.action(args, inventory, env, playbook)
+ def service(self, args):
+ """
+ Make the same service call across all nodes in the cluster
+ :param args: command line arguments provided by user
+ :return: exit status from run command
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'new_cluster_state': args.state}
+
+ playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ return self.action(args, inventory, env, playbook)
+
def setup_provider(self, provider):
"""
Setup ansible playbook environment
@@ -127,6 +143,8 @@ class Cluster(object):
inventory = '-i inventory/aws/hosts'
elif 'libvirt' == provider:
inventory = '-i inventory/libvirt/hosts'
+ elif 'openstack' == provider:
+ inventory = '-i inventory/openstack/hosts'
else:
# this code should never be reached
raise ValueError("invalid PROVIDER {}".format(provider))
@@ -147,6 +165,11 @@ class Cluster(object):
if args.verbose > 0:
verbose = '-{}'.format('v' * args.verbose)
+ if args.option:
+ for opt in args.option:
+ k, v = opt.split('=', 1)
+ env['opt_'+k] = v
+
ansible_env = '-e \'{}\''.format(
' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
)
@@ -167,25 +190,49 @@ class Cluster(object):
if __name__ == '__main__':
"""
- Implemented to support writing unit tests
+ User command to invoke ansible playbooks in a "known" environment
+
+ Reads ~/.openshift-ansible for default configuration items
+ [DEFAULT]
+ validate_cluster_ids = False
+ cluster_ids = marketing,sales
+ providers = gce,aws,libvirt,openstack
"""
+ environment = ConfigParser.SafeConfigParser({
+ 'cluster_ids': 'marketing,sales',
+ 'validate_cluster_ids': 'False',
+ 'providers': 'gce,aws,libvirt,openstack',
+ })
+
+ path = os.path.expanduser("~/.openshift-ansible")
+ if os.path.isfile(path):
+ environment.read(path)
+
cluster = Cluster()
- providers = ['gce', 'aws', 'libvirt']
parser = argparse.ArgumentParser(
description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
)
parser.add_argument('-v', '--verbose', action='count',
help='Multiple -v options increase the verbosity')
- parser.add_argument('--version', action='version', version='%(prog)s 0.2')
+ parser.add_argument('--version', action='version', version='%(prog)s 0.3')
meta_parser = argparse.ArgumentParser(add_help=False)
+ providers = environment.get('DEFAULT', 'providers').split(',')
meta_parser.add_argument('provider', choices=providers, help='provider')
- meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+
+ if environment.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
+ meta_parser.add_argument('cluster_id', choices=environment.get('DEFAULT', 'cluster_ids').split(','),
+ help='prefix for cluster VM names')
+ else:
+ meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+
meta_parser.add_argument('-t', '--deployment-type',
choices=['origin', 'online', 'enterprise'],
help='Deployment type. (default: origin)')
+ meta_parser.add_argument('-o', '--option', action='append',
+ help='options')
action_parser = parser.add_subparsers(dest='action', title='actions',
description='Choose from valid actions')
@@ -221,6 +268,13 @@ if __name__ == '__main__':
parents=[meta_parser])
list_parser.set_defaults(func=cluster.list)
+ service_parser = action_parser.add_parser('service', help='service for openshift across cluster',
+ parents=[meta_parser])
+ # choices are the only ones valid for the ansible service module: http://docs.ansible.com/service_module.html
+ service_parser.add_argument('state', choices=['started', 'stopped', 'restarted', 'reloaded'],
+ help='make service call across cluster')
+ service_parser.set_defaults(func=cluster.service)
+
args = parser.parse_args()
if 'terminate' == args.action and not args.force:
@@ -230,7 +284,8 @@ if __name__ == '__main__':
exit(1)
if 'update' == args.action and not args.force:
- answer = raw_input("This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+ answer = raw_input(
+ "This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
if answer not in ['y', 'Y']:
sys.stderr.write('\nACTION [update] aborted by user!\n')
exit(1)
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
index 884d4eb0a..fd2386c9a 100644
--- a/bin/openshift-ansible-bin.spec
+++ b/bin/openshift-ansible-bin.spec
@@ -1,6 +1,6 @@
Summary: OpenShift Ansible Scripts for working with metadata hosts
Name: openshift-ansible-bin
-Version: 0.0.17
+Version: 0.0.18
Release: 1%{?dist}
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -42,6 +42,13 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
%config(noreplace) /etc/openshift_ansible/
%changelog
+* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
+- Implement OpenStack provider (lhuard@amadeus.com)
+- * Update defaults and examples to track core concepts guide
+ (jhonce@redhat.com)
+- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com)
+- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com)
+
* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
- fixed the openshift-ansible-bin build (twiest@redhat.com)
diff --git a/cloud.rb b/cloud.rb
deleted file mode 100755
index 934066662..000000000
--- a/cloud.rb
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env ruby
-
-require 'thor'
-require_relative 'lib/gce_command'
-require_relative 'lib/aws_command'
-
-# Don't buffer output to the client
-STDOUT.sync = true
-STDERR.sync = true
-
-module OpenShift
- module Ops
- class CloudCommand < Thor
- desc 'gce', 'Manages Google Compute Engine assets'
- subcommand "gce", GceCommand
-
- desc 'aws', 'Manages Amazon Web Services assets'
- subcommand "aws", AwsCommand
- end
- end
-end
-
-if __FILE__ == $0
- SCRIPT_DIR = File.expand_path(File.dirname(__FILE__))
- Dir.chdir(SCRIPT_DIR) do
- # Kick off thor
- OpenShift::Ops::CloudCommand.start(ARGV)
- end
-end
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
new file mode 100644
index 000000000..841f6e72c
--- /dev/null
+++ b/docs/best_practices_guide.adoc
@@ -0,0 +1,218 @@
+// vim: ft=asciidoc
+
+= Openshift-Ansible Best Practices Guide
+
+The purpose of this guide is to describe the preferred patterns and best practices used in this repository (both in ansible and python).
+
+It is important to note that this repository may not currently comply with all best practices, but the intention is that it will.
+
+All new pull requests created against this repository MUST comply with this guide.
+
+This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
+
+
+== Pull Requests
+
+[cols="2v,v"]
+|===
+| **Rule**
+| All pull requests MUST pass the build bot *before* they are merged.
+|===
+
+The purpose of this rule is to avoid cases where the build bot will fail pull requests for code modified in a previous pull request.
+
+The tooling is flexible enough that exceptions can be made so that the tool the build bot is running will ignore certain areas or certain checks, but the build bot itself must pass for the pull request to be merged.
+
+
+
+== Python
+
+=== PyLint
+http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| PyLint rules MUST NOT be disabled on a whole file.
+|===
+
+Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining].
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| PyLint rules MUST NOT be disabled unless they meet one of the following exceptions
+|===
+
+.Exceptions:
+1. When PyLint fails because of a dependency that can't be installed on the build bot
+1. When PyLint fails because of including a module that is outside of control (like Ansible)
+1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable.
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| All PyLint rule disables MUST be documented in the code.
+|===
+
+The purpose of this rule is to inform future developers about the disable.
+
+.Specifically, the following MUST accompany every PyLint disable:
+1. Why is the check being disabled?
+1. Is disabling this check meant to be permanent or temporary?
+
+.Example:
+[source,python]
+----
+# Reason: disable pylint maybe-no-member because overloaded use of
+# the module name causes pylint to not detect that 'results'
+# is an array or hash
+# Status: permanently disabled unless a way is found to fix this.
+# pylint: disable=maybe-no-member
+metadata[line] = results.pop()
+----
+
+
+== Ansible
+
+=== Yaml Files (Playbooks, Roles, Vars, etc)
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Ansible files SHOULD NOT use JSON (use pure YAML instead).
+|===
+
+YAML is a superset of JSON, which means that Ansible allows JSON syntax to be interspersed. Even though YAML (and by extension Ansible) allows for this, JSON SHOULD NOT be used.
+
+.Reasons:
+* Ansible is able to give clearer error messages when the files are pure YAML
+* YAML reads nicer (preference held by several team members)
+* YAML makes for nicer diffs as YAML tends to be multi-line, whereas JSON tends to be more concise
+
+.Exceptions:
+* Ansible static inventory files are INI files. To pass in variables for specific hosts, Ansible allows for these variables to be put inside of the static inventory files. These variables can be in JSON format, but can't be in YAML format. This is an acceptable use of JSON, as YAML is not allowed in this case.
+
+Every effort should be made to keep our Ansible YAML files in pure YAML.
+
+=== Defensive Programming
+
+.Context
+* http://docs.ansible.com/fail_module.html[Ansible Fail Module]
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Ansible playbooks MUST begin with checks for any variables that they require.
+|===
+
+If an Ansible playbook requires certain variables to be set, it's best to check for these up front before any other actions have been performed. In this way, the user knows exactly what needs to be passed into the playbook.
+
+.Example:
+[source,yaml]
+----
+---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="This playbook requires g_environment to be set and non empty"
+ when: g_environment is not defined or g_environment == ''
+----
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require.
+|===
+
+If an Ansible role requires certain variables to be set, it's best to check for these up front before any other actions have been performed. In this way, the user knows exactly what needs to be passed into the role.
+
+.Example:
+[source,yaml]
+----
+---
+# tasks/main.yml
+- fail: msg="This role requires arl_environment to be set and non empty"
+ when: arl_environment is not defined or arl_environment == ''
+----
+
+=== Roles
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| The Ansible roles directory MUST maintain a flat structure.
+|===
+
+.Context
+* http://docs.ansible.com/playbooks_best_practices.html#directory-layout[Ansible Suggested Directory Layout]
+
+.The purpose of this rule is to:
+* Comply with the upstream best practices
+* Make it familiar for new contributors
+* Make it compatible with Ansible Galaxy
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Ansible Roles SHOULD be named like technology_component[_subcomponent].
+|===
+
+For consistency, role names SHOULD follow the above naming pattern. It is important to note that this is a recommendation for role naming, and follows the pattern used by upstream.
+
+Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used.
+
+.Examples:
+* The role to configure an OpenShift Master is called `openshift_master`
+* The role to configure OpenShift specific yum repositories is called `openshift_repos`
+
+=== Filters
+.Context:
+* https://docs.ansible.com/playbooks_filters.html[Ansible Playbook Filters]
+* http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters]
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| The `default` filter SHOULD replace empty strings, lists, etc.
+|===
+
+When using the jinja2 `default` filter, unless the variable is a boolean, specify `true` as the second parameter. This will cause the default filter to replace empty strings, lists, etc with the provided default.
+
+This is because it is preferable to either have a sane default set than to have an empty string, list, etc. For example, it is preferable to have a config value set to a sane default than to have it simply set as an empty string.
+
+.From the http://jinja.pocoo.org/docs/dev/templates/[Jinja2 Docs]:
+[quote]
+If you want to use default with variables that evaluate to false you have to set the second parameter to true
+
+.Example:
+[source,yaml]
+----
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ somevar: ''
+ tasks:
+ - debug: var=somevar
+
+ - name: "Will output 'somevar: []'"
+ debug: "msg='somevar: [{{ somevar | default('the string was empty') }}]'"
+
+ - name: "Will output 'somevar: [the string was empty]'"
+ debug: "msg='somevar: [{{ somevar | default('the string was empty', true) }}]'"
+----
+
+
+In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc.
+
+This is almost always more desirable than an empty list, string, etc.
diff --git a/docs/core_concepts_guide.adoc b/docs/core_concepts_guide.adoc
new file mode 100644
index 000000000..38187c55e
--- /dev/null
+++ b/docs/core_concepts_guide.adoc
@@ -0,0 +1,43 @@
+// vim: ft=asciidoc
+
+= Openshift-Ansible Core Concepts Guide
+
+The purpose of this guide is to describe core concepts used in this repository.
+
+It is important to note that this repository may not currently implement all of the concepts, but the intention is that it will.
+
+== Logical Grouping Concepts
+The following are the concepts used to logically group OpenShift cluster instances.
+
+These groupings are used to perform operations specifically against instances in the specified group.
+
+For example, run an Ansible playbook against all instances in the `production` environment, or run an adhoc command against all instances in the `acme-corp` cluster group.
+
+=== Cluster
+A Cluster is a complete install of OpenShift (master, nodes, registry, router, etc).
+
+Example: Acme Corp has sales and marketing departments that both want to use OpenShift for their internal applications, but they do not want to share resources because they have different cost centers. Each department could have their own completely separate install of OpenShift. Each install is a separate OpenShift cluster.
+
+Defined Clusters:
+`acme-sales`
+`acme-marketing`
+
+=== Cluster Group
+A cluster group is a logical grouping of one or more clusters. Which clusters are in which cluster groups is determined by the OpenShift administrators.
+
+Example: Extending the example above, both marketing and sales clusters are part of Acme Corp. Let's say that Acme Corp contracts with Hosting Corp to host their OpenShift clusters. Hosting Corp could create an Acme Corp cluster group.
+
+This would logically group Acme Corp resources from other Hosting Corp customers, which would enable the Hosting Corp's OpenShift administrators to run operations specifically targeting Acme Corp instances.
+
+Defined Cluster Group:
+`acme-corp`
+
+=== Environment
+An environment is a logical grouping of one or more cluster groups. How the environment is defined is determined by the OpenShift administrators.
+
+Example: Extending the two examples above, Hosting Corp is upgrading to the latest version of OpenShift. Before deploying it to their clusters in the Production environment, they want to test it out. So, Hosting Corp runs an Ansible playbook specifically against all of the cluster groups in the Staging environment in order to do the OpenShift upgrade.
+
+
+Defined Environments:
+`production`
+`staging`
diff --git a/docs/style_guide.adoc b/docs/style_guide.adoc
new file mode 100644
index 000000000..3b888db12
--- /dev/null
+++ b/docs/style_guide.adoc
@@ -0,0 +1,138 @@
+// vim: ft=asciidoc
+
+= Openshift-Ansible Style Guide
+
+The purpose of this guide is to describe the preferred coding conventions used in this repository (both in ansible and python).
+
+It is important to note that this repository may not currently comply with all style guide rules, but the intention is that it will.
+
+All new pull requests created against this repository MUST comply with this guide.
+
+This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
+
+== Python
+
+
+=== Python Maximum Line Length
+
+.Context:
+* https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length]
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| All lines SHOULD be no longer than 80 characters.
+|===
+
+Every attempt SHOULD be made to comply with this soft line length limit, and only when it makes the code more readable should this be violated.
+
+Code readability is subjective, therefore pull-requests SHOULD still be merged, even if they violate this soft limit as it is up to the individual contributor to determine if they should violate the 80 character soft limit.
+
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| All lines MUST be no longer than 120 characters.
+|===
+
+This is a hard limit and is enforced by the build bot. This check MUST NOT be disabled.
+
+
+
+== Ansible
+
+=== Ansible Global Variables
+Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc.
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Global variables MUST have a prefix of g_
+|===
+
+
+Example:
+[source]
+----
+g_environment: someval
+----
+
+=== Ansible Role Variables
+Ansible role variables are defined as variables contained in (or passed into) a role.
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules.
+|===
+
+==== Role with 3 (or more) words in the name
+
+Take the first letter of each of the words.
+
+.3 word example:
+* Role name: made_up_role
+* Prefix: mur
+[source]
+----
+mur_var1: value_one
+----
+
+.4 word example:
+* Role name: totally_made_up_role
+* Prefix: tmur
+[source]
+----
+tmur_var1: value_one
+----
+
+
+
+==== Role with 2 (or less) words in the name
+
+Make up a prefix that makes sense.
+
+.1 word example:
+* Role name: ansible
+* Prefix: ans
+[source]
+----
+ans_var1: value_one
+----
+
+.2 word example:
+* Role name: ansible_tower
+* Prefix: tow
+[source]
+----
+tow_var1: value_one
+----
+
+
+==== Role name prefix conflicts
+If two role names contain words that start with the same letters, it will seem like their prefixes would conflict.
+
+Role variables are confined to the roles themselves, so this is actually only a problem if one of the roles depends on the other role (or uses includes into the other role).
+
+.Same prefix example:
+* First Role Name: made_up_role
+* First Role Prefix: mur
+* Second Role Name: my_uber_role
+* Second Role Prefix: mur
+[source]
+----
+- hosts: localhost
+ roles:
+ - { role: made_up_role, mur_var1: val1 }
+ - { role: my_uber_role, mur_var1: val2 }
+----
+
+Even though both roles have the same prefix (mur), and even though both roles have a variable named mur_var1, these two variables never exist outside of their respective roles. This means that this is not a problem.
+
+This would only be a problem if my_uber_role depended on made_up_role, or vice versa. Or if either of these two roles included things from the other.
+
+This is enough of a corner case that it is unlikely to happen. If it does, it will be addressed on a case by case basis.
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 097038450..b7248efaa 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -9,188 +9,230 @@ from ansible import errors
from operator import itemgetter
import pdb
-def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
-def oo_len(arg):
- ''' This returns the length of the argument
- Ex: "{{ hostvars | oo_len }}"
- '''
- return len(arg)
-
-def get_attr(data, attribute=None):
- ''' This looks up dictionary attributes of the form a.b.c and returns
- the value.
- Ex: data = {'a': {'b': {'c': 5}}}
- attribute = "a.b.c"
- returns 5
- '''
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- ptr = data
- for attr in attribute.split('.'):
- ptr = ptr[attr]
-
- return ptr
-
-def oo_flatten(data):
- ''' This filter plugin will flatten a list of lists
- '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects to flatten a List")
-
- return [item for sublist in data for item in sublist]
-
-
-def oo_collect(data, attribute=None, filters=None):
- ''' This takes a list of dict and collects all attributes specified into a
- list If filter is specified then we will include all items that match
- _ALL_ of filters.
- Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
- attribute = 'a'
- filters = {'z': 'z'}
- returns [1, 2, 3]
- '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
-
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- if filters is not None:
- if not issubclass(type(filters), dict):
- raise errors.AnsibleFilterError("|fialed expects filter to be a"
- " dict")
- retval = [get_attr(d, attribute) for d in data if (
- all([d[key] == filters[key] for key in filters]))]
- else:
- retval = [get_attr(d, attribute) for d in data]
-
- return retval
-
-def oo_select_keys(data, keys):
- ''' This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- '''
-
- if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict")
-
- if not issubclass(type(keys), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys]
-
- return retval
-
-def oo_prepend_strings_in_list(data, prepend):
- ''' This takes a list of strings and prepends a string to each item in the
- list
- Ex: data = ['cart', 'tree']
- prepend = 'apple-'
- returns ['apple-cart', 'apple-tree']
- '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not all(isinstance(x, basestring) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list"
- " of strings")
- retval = [prepend + s for s in data]
- return retval
-
-def oo_ami_selector(data, image_name):
- ''' This takes a list of amis and an image name and attempts to return
- the latest ami.
- '''
- if not issubclass(type(data), list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- if not data:
- return None
- else:
- if image_name is None or not image_name.endswith('_*'):
- ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
- return ami['ami_id']
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def get_attr(data, attribute=None):
+ ''' This looks up dictionary attributes of the form a.b.c and returns
+ the value.
+ Ex: data = {'a': {'b': {'c': 5}}}
+ attribute = "a.b.c"
+ returns 5
+ '''
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ ptr = data
+ for attr in attribute.split('.'):
+ ptr = ptr[attr]
+
+ return ptr
+
+ @staticmethod
+ def oo_flatten(data):
+ ''' This filter plugin will flatten a list of lists
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [item for sublist in data for item in sublist]
+
+
+ @staticmethod
+ def oo_collect(data, attribute=None, filters=None):
+ ''' This takes a list of dict and collects all attributes specified into a
+ list If filter is specified then we will include all items that match
+ _ALL_ of filters.
+ Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3]
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
+
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ if filters is not None:
+ if not issubclass(type(filters), dict):
+ raise errors.AnsibleFilterError("|fialed expects filter to be a"
+ " dict")
+ retval = [FilterModule.get_attr(d, attribute) for d in data if (
+ all([d[key] == filters[key] for key in filters]))]
else:
- ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
- ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
- return ami['ami_id']
-
-def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
- ''' This takes a dictionary of volume definitions and returns a valid ec2
- volume definition based on the host_type and the values in the
- dictionary.
- The dictionary should look similar to this:
- { 'master':
- { 'root':
- { 'volume_size': 10, 'device_type': 'gp2',
- 'iops': 500
- }
- },
- 'node':
- { 'root':
- { 'volume_size': 10, 'device_type': 'io1',
- 'iops': 1000
+ retval = [FilterModule.get_attr(d, attribute) for d in data]
+
+ return retval
+
+ @staticmethod
+ def oo_select_keys(data, keys):
+ ''' This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ '''
+
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict")
+
+ if not issubclass(type(keys), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys]
+
+ return retval
+
+ @staticmethod
+ def oo_prepend_strings_in_list(data, prepend):
+ ''' This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, basestring) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+ @staticmethod
+ def oo_combine_key_value(data, joiner='='):
+ '''Take a list of dict in the form of { 'key': 'value'} and
+ arrange them as a list of strings ['key=value']
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ rval = []
+ for item in data:
+ rval.append("%s%s%s" % (item['key'], joiner, item['value']))
+
+ return rval
+
+ @staticmethod
+ def oo_ami_selector(data, image_name):
+ ''' This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
+ else:
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+ @staticmethod
+ def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+ ''' This takes a dictionary of volume definitions and returns a valid ec2
+ volume definition based on the host_type and the values in the
+ dictionary.
+ The dictionary should look similar to this:
+ { 'master':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'gp2',
+ 'iops': 500
+ }
},
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
+ 'node':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'io1',
+ 'iops': 1000
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
}
}
- }
- '''
- if not issubclass(type(data), dict):
- raise errors.AnsibleFilterError("|failed expects first param is a dict")
- if host_type not in ['master', 'node']:
- raise errors.AnsibleFilterError("|failed expects either master or node"
- " host type")
-
- root_vol = data[host_type]['root']
- root_vol['device_name'] = '/dev/sda1'
- root_vol['delete_on_termination'] = True
- if root_vol['device_type'] != 'io1':
- root_vol.pop('iops', None)
- if host_type == 'node':
- docker_vol = data[host_type]['docker']
- docker_vol['device_name'] = '/dev/xvdb'
- docker_vol['delete_on_termination'] = True
- if docker_vol['device_type'] != 'io1':
- docker_vol.pop('iops', None)
- if docker_ephemeral:
- docker_vol.pop('device_type', None)
- docker_vol.pop('delete_on_termination', None)
- docker_vol['ephemeral'] = 'ephemeral0'
- return [root_vol, docker_vol]
- return [root_vol]
-
-# disabling pylint checks for too-few-public-methods and no-self-use since we
-# need to expose a FilterModule object that has a filters method that returns
-# a mapping of filter names to methods.
-# pylint: disable=too-few-public-methods, no-self-use
-class FilterModule(object):
- ''' FilterModule '''
+ '''
+ if not issubclass(type(data), dict):
+ raise errors.AnsibleFilterError("|failed expects first param is a dict")
+ if host_type not in ['master', 'node']:
+ raise errors.AnsibleFilterError("|failed expects either master or node"
+ " host type")
+
+ root_vol = data[host_type]['root']
+ root_vol['device_name'] = '/dev/sda1'
+ root_vol['delete_on_termination'] = True
+ if root_vol['device_type'] != 'io1':
+ root_vol.pop('iops', None)
+ if host_type == 'node':
+ docker_vol = data[host_type]['docker']
+ docker_vol['device_name'] = '/dev/xvdb'
+ docker_vol['delete_on_termination'] = True
+ if docker_vol['device_type'] != 'io1':
+ docker_vol.pop('iops', None)
+ if docker_ephemeral:
+ docker_vol.pop('device_type', None)
+ docker_vol.pop('delete_on_termination', None)
+ docker_vol['ephemeral'] = 'ephemeral0'
+ return [root_vol, docker_vol]
+ return [root_vol]
+
+ @staticmethod
+ def oo_split(string, separator=','):
+ ''' This splits the input string into a list
+ '''
+ return string.split(separator)
+
+ @staticmethod
+ def oo_filter_list(data, filter_attr=None):
+ ''' This returns a list, which contains all items where filter_attr
+ evaluates to true
+ Ex: data = [ { a: 1, b: True },
+ { a: 3, b: False },
+ { a: 5, b: True } ]
+ filter_attr = 'b'
+ returns [ { a: 1, b: True },
+ { a: 5, b: True } ]
+ '''
+ if not issubclass(type(data), list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+ if not issubclass(type(filter_attr), str):
+ raise errors.AnsibleFilterError("|failed expects filter_attr is a str")
+
+ # Gather up the values for the list of keys passed in
+ return [x for x in data if x[filter_attr]]
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
- "oo_select_keys": oo_select_keys,
- "oo_collect": oo_collect,
- "oo_flatten": oo_flatten,
- "oo_len": oo_len,
- "oo_pdb": oo_pdb,
- "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
- "oo_ami_selector": oo_ami_selector,
- "oo_ec2_volume_definition": oo_ec2_volume_definition
+ "oo_select_keys": self.oo_select_keys,
+ "oo_collect": self.oo_collect,
+ "oo_flatten": self.oo_flatten,
+ "oo_pdb": self.oo_pdb,
+ "oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
+ "oo_ami_selector": self.oo_ami_selector,
+ "oo_ec2_volume_definition": self.oo_ec2_volume_definition,
+ "oo_combine_key_value": self.oo_combine_key_value,
+ "oo_split": self.oo_split,
+ "oo_filter_list": self.oo_filter_list
}
diff --git a/git/.pylintrc b/git/.pylintrc
index 2d45f867e..af8f1656f 100644
--- a/git/.pylintrc
+++ b/git/.pylintrc
@@ -70,7 +70,8 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+# w0511 - fixme - disabled because TODOs are acceptable
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511
[REPORTS]
@@ -285,7 +286,7 @@ notes=FIXME,XXX,TODO
[FORMAT]
# Maximum number of characters on a single line.
-max-line-length=100
+max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
@@ -321,7 +322,7 @@ max-args=5
ignored-argument-names=_.*
# Maximum number of locals for function / method body
-max-locals=15
+max-locals=20
# Maximum number of return / yield for function / method body
max-returns=6
diff --git a/inventory/byo/hosts b/inventory/byo/hosts
index 728eec8aa..4d4da5468 100644
--- a/inventory/byo/hosts
+++ b/inventory/byo/hosts
@@ -17,20 +17,23 @@ ansible_ssh_user=root
deployment_type=enterprise
# Pre-release registry URL
-openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
# Pre-release additional repo
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+# htpasswd auth
+#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
+
# host group for masters
[masters]
ose3-master-ansible.test.example.com
# host group for nodes
[nodes]
-ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+#ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
index 4652f112e..1c9c17308 100755
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python2
-"""
+'''
libvirt external inventory script
=================================
@@ -12,7 +12,7 @@ To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
-"""
+'''
# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
#
@@ -36,9 +36,7 @@ info about all of your managed instances.
import argparse
import ConfigParser
import os
-import re
import sys
-from time import time
import libvirt
import xml.etree.ElementTree as ET
@@ -49,8 +47,11 @@ except ImportError:
class LibvirtInventory(object):
+ ''' libvirt dynamic inventory '''
def __init__(self):
+ ''' Main execution path '''
+
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
@@ -59,13 +60,15 @@ class LibvirtInventory(object):
self.parse_cli_args()
if self.args.host:
- print self.json_format_dict(self.get_host_info(), self.args.pretty)
+ print _json_format_dict(self.get_host_info(), self.args.pretty)
elif self.args.list:
- print self.json_format_dict(self.get_inventory(), self.args.pretty)
+ print _json_format_dict(self.get_inventory(), self.args.pretty)
else: # default action with no options
- print self.json_format_dict(self.get_inventory(), self.args.pretty)
+ print _json_format_dict(self.get_inventory(), self.args.pretty)
def read_settings(self):
+ ''' Reads the settings from the libvirt.ini file '''
+
config = ConfigParser.SafeConfigParser()
config.read(
os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
@@ -73,6 +76,8 @@ class LibvirtInventory(object):
self.libvirt_uri = config.get('libvirt', 'uri')
def parse_cli_args(self):
+ ''' Command line argument processing '''
+
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on libvirt'
)
@@ -96,25 +101,27 @@ class LibvirtInventory(object):
self.args = parser.parse_args()
def get_host_info(self):
+ ''' Get variables about a specific host '''
+
inventory = self.get_inventory()
if self.args.host in inventory['_meta']['hostvars']:
return inventory['_meta']['hostvars'][self.args.host]
def get_inventory(self):
+ ''' Construct the inventory '''
+
inventory = dict(_meta=dict(hostvars=dict()))
conn = libvirt.openReadOnly(self.libvirt_uri)
if conn is None:
- print "Failed to open connection to %s" % libvirt_uri
+ print "Failed to open connection to %s" % self.libvirt_uri
sys.exit(1)
domains = conn.listAllDomains()
if domains is None:
- print "Failed to list domains for connection %s" % libvirt_uri
+ print "Failed to list domains for connection %s" % self.libvirt_uri
sys.exit(1)
- arp_entries = self.parse_arp_entries()
-
for domain in domains:
hostvars = dict(libvirt_name=domain.name(),
libvirt_id=domain.ID(),
@@ -130,21 +137,30 @@ class LibvirtInventory(object):
hostvars['libvirt_status'] = 'running'
root = ET.fromstring(domain.XMLDesc())
- ns = {'ansible': 'https://github.com/ansible/ansible'}
- for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns):
+ ansible_ns = {'ansible': 'https://github.com/ansible/ansible'}
+ for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ansible_ns):
tag = tag_elem.text
- self.push(inventory, "tag_%s" % tag, domain_name)
- self.push(hostvars, 'libvirt_tags', tag)
+ _push(inventory, "tag_%s" % tag, domain_name)
+ _push(hostvars, 'libvirt_tags', tag)
# TODO: support more than one network interface, also support
# interface types other than 'network'
interface = root.find("./devices/interface[@type='network']")
if interface is not None:
+ source_elem = interface.find('source')
mac_elem = interface.find('mac')
- if mac_elem is not None:
- mac = mac_elem.get('address')
- if mac in arp_entries:
- ip_address = arp_entries[mac]['ip_address']
+ if source_elem is not None and \
+ mac_elem is not None:
+ # Adding this to disable pylint check specifically
+ # ignoring libvirt-python versions that
+ # do not include DHCPLeases
+ # This is needed until we upgrade the build bot to
+ # RHEL7 (>= 1.2.6 libvirt)
+ # pylint: disable=no-member
+ dhcp_leases = conn.networkLookupByName(source_elem.get('network')) \
+ .DHCPLeases(mac_elem.get('address'))
+ if len(dhcp_leases) > 0:
+ ip_address = dhcp_leases[0]['ipaddr']
hostvars['ansible_ssh_host'] = ip_address
hostvars['libvirt_ip_address'] = ip_address
@@ -152,28 +168,23 @@ class LibvirtInventory(object):
return inventory
- def parse_arp_entries(self):
- arp_entries = dict()
- with open('/proc/net/arp', 'r') as f:
- # throw away the header
- f.readline()
-
- for line in f:
- ip_address, _, _, mac, _, device = line.strip().split()
- arp_entries[mac] = dict(ip_address=ip_address, device=device)
-
- return arp_entries
-
- def push(self, my_dict, key, element):
- if key in my_dict:
- my_dict[key].append(element)
- else:
- my_dict[key] = [element]
-
- def json_format_dict(self, data, pretty=False):
- if pretty:
- return json.dumps(data, sort_keys=True, indent=2)
- else:
- return json.dumps(data)
+def _push(my_dict, key, element):
+ '''
+ Push element to the my_dict[key] list.
+ After having initialized my_dict[key] if it dosn't exist.
+ '''
+
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+def _json_format_dict(data, pretty=False):
+ ''' Serialize data to a JSON formated str '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
LibvirtInventory()
diff --git a/inventory/multi_ec2.py b/inventory/multi_ec2.py
index f8196aefd..b7ce9e5dc 100755
--- a/inventory/multi_ec2.py
+++ b/inventory/multi_ec2.py
@@ -82,7 +82,6 @@ class MultiEc2(object):
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
- # Set the default cache path but if its defined we'll assign it.
if self.config.has_key('cache_location'):
self.cache_path = self.config['cache_location']
@@ -217,7 +216,12 @@ class MultiEc2(object):
# For any non-zero, raise an error on it
for result in provider_results:
if result['code'] != 0:
- raise RuntimeError(result['err'])
+ err_msg = ['\nProblem fetching account: {name}',
+ 'Error Code: {code}',
+ 'StdErr: {err}',
+ 'Stdout: {out}',
+ ]
+ raise RuntimeError('\n'.join(err_msg).format(**result))
else:
self.all_ec2_results[result['name']] = json.loads(result['out'])
@@ -248,8 +252,9 @@ class MultiEc2(object):
data[str(host_property)] = str(value)
# Add this group
- results["%s_%s" % (host_property, value)] = \
- copy.copy(results[acc_config['all_group']])
+ if results.has_key(acc_config['all_group']):
+ results["%s_%s" % (host_property, value)] = \
+ copy.copy(results[acc_config['all_group']])
# store the results back into all_ec2_results
self.all_ec2_results[acc_config['name']] = results
diff --git a/inventory/openshift-ansible-inventory.spec b/inventory/openshift-ansible-inventory.spec
index cd2332549..900a27f3a 100644
--- a/inventory/openshift-ansible-inventory.spec
+++ b/inventory/openshift-ansible-inventory.spec
@@ -1,6 +1,6 @@
Summary: OpenShift Ansible Inventories
Name: openshift-ansible-inventory
-Version: 0.0.7
+Version: 0.0.8
Release: 1%{?dist}
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -36,6 +36,17 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
/usr/share/ansible/inventory/gce/gce.py*
%changelog
+* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
+- Added more verbosity when error happens. Also fixed a bug.
+ (kwoodson@redhat.com)
+- Implement OpenStack provider (lhuard@amadeus.com)
+- * rename openshift_registry_url oreg_url * rename option_images to
+ _{oreg|ortr}_images (jhonce@redhat.com)
+- Fix the remaining pylint warnings (lhuard@amadeus.com)
+- Fix some of the pylint warnings (lhuard@amadeus.com)
+- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com)
+- fixed the openshift-ansible-bin build (twiest@redhat.com)
+
* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
- Making multi_ec2 into a library (kwoodson@redhat.com)
diff --git a/inventory/openstack/hosts/hosts b/inventory/openstack/hosts/hosts
new file mode 100644
index 000000000..9cdc31449
--- /dev/null
+++ b/inventory/openstack/hosts/hosts
@@ -0,0 +1 @@
+localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
diff --git a/inventory/openstack/hosts/nova.ini b/inventory/openstack/hosts/nova.ini
new file mode 100644
index 000000000..4900c4965
--- /dev/null
+++ b/inventory/openstack/hosts/nova.ini
@@ -0,0 +1,45 @@
+# Ansible OpenStack external inventory script
+
+[openstack]
+
+#-------------------------------------------------------------------------
+# Required settings
+#-------------------------------------------------------------------------
+
+# API version
+version = 2
+
+# OpenStack nova username
+username =
+
+# OpenStack nova api_key or password
+api_key =
+
+# OpenStack nova auth_url
+auth_url =
+
+# OpenStack nova project_id or tenant name
+project_id =
+
+#-------------------------------------------------------------------------
+# Optional settings
+#-------------------------------------------------------------------------
+
+# Authentication system
+# auth_system = keystone
+
+# Serverarm region name to use
+# region_name =
+
+# Specify a preference for public or private IPs (public is default)
+# prefer_private = False
+
+# What service type (required for newer nova client)
+# service_type = compute
+
+
+# TODO: Some other options
+# insecure =
+# endpoint_type =
+# extensions =
+# service_name =
diff --git a/inventory/openstack/hosts/nova.py b/inventory/openstack/hosts/nova.py
new file mode 100755
index 000000000..d5bd8d1ee
--- /dev/null
+++ b/inventory/openstack/hosts/nova.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python2
+
+# pylint: skip-file
+
+# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import re
+import os
+import ConfigParser
+from novaclient import client as nova_client
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+###################################################
+# executed with no parameters, return the list of
+# all groups and hosts
+
+NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
+ os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
+ "/etc/ansible/nova.ini"]
+
+NOVA_DEFAULTS = {
+ 'auth_system': None,
+ 'region_name': None,
+ 'service_type': 'compute',
+}
+
+
+def nova_load_config_file():
+ p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
+
+ for path in NOVA_CONFIG_FILES:
+ if os.path.exists(path):
+ p.read(path)
+ return p
+
+ return None
+
+
+def get_fallback(config, value, section="openstack"):
+ """
+ Get value from config object and return the value
+ or false
+ """
+ try:
+ return config.get(section, value)
+ except ConfigParser.NoOptionError:
+ return False
+
+
+def push(data, key, element):
+ """
+ Assist in items to a dictionary of lists
+ """
+ if (not element) or (not key):
+ return
+
+ if key in data:
+ data[key].append(element)
+ else:
+ data[key] = [element]
+
+
+def to_safe(word):
+ '''
+ Converts 'bad' characters in a string to underscores so they can
+ be used as Ansible groups
+ '''
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+
+def get_ips(server, access_ip=True):
+ """
+ Returns a list of the server's IPs, or the preferred
+ access IP
+ """
+ private = []
+ public = []
+ address_list = []
+ # Iterate through each servers network(s), get addresses and get type
+ addresses = getattr(server, 'addresses', {})
+ if len(addresses) > 0:
+ for network in addresses.itervalues():
+ for address in network:
+ if address.get('OS-EXT-IPS:type', False) == 'fixed':
+ private.append(address['addr'])
+ elif address.get('OS-EXT-IPS:type', False) == 'floating':
+ public.append(address['addr'])
+
+ if not access_ip:
+ address_list.append(server.accessIPv4)
+ address_list.extend(private)
+ address_list.extend(public)
+ return address_list
+
+ access_ip = None
+ # Append group to list
+ if server.accessIPv4:
+ access_ip = server.accessIPv4
+ if (not access_ip) and public and not (private and prefer_private):
+ access_ip = public[0]
+ if private and not access_ip:
+ access_ip = private[0]
+
+ return access_ip
+
+
+def get_metadata(server):
+ """Returns dictionary of all host metadata"""
+ get_ips(server, False)
+ results = {}
+ for key in vars(server):
+ # Extract value
+ value = getattr(server, key)
+
+ # Generate sanitized key
+ key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
+
+ # Att value to instance result (exclude manager class)
+ #TODO: maybe use value.__class__ or similar inside of key_name
+ if key != 'os_manager':
+ results[key] = value
+ return results
+
+config = nova_load_config_file()
+if not config:
+ sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
+
+# Load up connections info based on config and then environment
+# variables
+username = (get_fallback(config, 'username') or
+ os.environ.get('OS_USERNAME', None))
+api_key = (get_fallback(config, 'api_key') or
+ os.environ.get('OS_PASSWORD', None))
+auth_url = (get_fallback(config, 'auth_url') or
+ os.environ.get('OS_AUTH_URL', None))
+project_id = (get_fallback(config, 'project_id') or
+ os.environ.get('OS_TENANT_NAME', None))
+region_name = (get_fallback(config, 'region_name') or
+ os.environ.get('OS_REGION_NAME', None))
+auth_system = (get_fallback(config, 'auth_system') or
+ os.environ.get('OS_AUTH_SYSTEM', None))
+
+# Determine what type of IP is preferred to return
+prefer_private = False
+try:
+ prefer_private = config.getboolean('openstack', 'prefer_private')
+except ConfigParser.NoOptionError:
+ pass
+
+client = nova_client.Client(
+ version=config.get('openstack', 'version'),
+ username=username,
+ api_key=api_key,
+ auth_url=auth_url,
+ region_name=region_name,
+ project_id=project_id,
+ auth_system=auth_system,
+ service_type=config.get('openstack', 'service_type'),
+)
+
+# Default or added list option
+if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
+ groups = {'_meta': {'hostvars': {}}}
+ # Cycle on servers
+ for server in client.servers.list():
+ access_ip = get_ips(server)
+
+ # Push to name group of 1
+ push(groups, server.name, access_ip)
+
+ # Run through each metadata item and add instance to it
+ for key, value in server.metadata.iteritems():
+ composed_key = to_safe('tag_{0}_{1}'.format(key, value))
+ push(groups, composed_key, access_ip)
+
+ # Do special handling of group for backwards compat
+ # inventory groups
+ group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
+ push(groups, group, access_ip)
+
+ # Add vars to _meta key for performance optimization in
+ # Ansible 1.3+
+ groups['_meta']['hostvars'][access_ip] = get_metadata(server)
+
+ # Return server list
+ print(json.dumps(groups, sort_keys=True, indent=2))
+ sys.exit(0)
+
+#####################################################
+# executed with a hostname as a parameter, return the
+# variables for that host
+
+elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
+ results = {}
+ ips = []
+ for server in client.servers.list():
+ if sys.argv[2] in (get_ips(server) or []):
+ results = get_metadata(server)
+ print(json.dumps(results, sort_keys=True, indent=2))
+ sys.exit(0)
+
+else:
+ print "usage: --list ..OR.. --host <hostname>"
+ sys.exit(1)
diff --git a/lib/ansible_helper.rb b/lib/ansible_helper.rb
deleted file mode 100644
index 395bb51a8..000000000
--- a/lib/ansible_helper.rb
+++ /dev/null
@@ -1,94 +0,0 @@
-require 'json'
-require 'parseconfig'
-
-module OpenShift
- module Ops
- class AnsibleHelper
- MYDIR = File.expand_path(File.dirname(__FILE__))
-
- attr_accessor :inventory, :extra_vars, :verbosity, :pipelining
-
- def initialize(extra_vars={}, inventory=nil)
- @extra_vars = extra_vars
- @verbosity = '-vvvv'
- @pipelining = true
- end
-
- def all_eof(files)
- files.find { |f| !f.eof }.nil?
- end
-
- def run_playbook(playbook)
- @inventory = 'inventory/hosts' if @inventory.nil?
-
- # This is used instead of passing in the json on the cli to avoid quoting problems
- tmpfile = Tempfile.open('extra_vars') { |f| f.write(@extra_vars.to_json); f}
-
- cmds = []
- #cmds << 'set -x'
- cmds << %Q[export ANSIBLE_FILTER_PLUGINS="#{Dir.pwd}/filter_plugins"]
-
- # We need this for launching instances, otherwise conflicting keys and what not kill it
- cmds << %q[export ANSIBLE_TRANSPORT="ssh"]
- cmds << %q[export ANSIBLE_SSH_ARGS="-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"]
-
- # We need pipelining off so that we can do sudo to enable the root account
- cmds << %Q[export ANSIBLE_SSH_PIPELINING='#{@pipelining.to_s}']
- cmds << %Q[time ansible-playbook -i #{@inventory} #{@verbosity} #{playbook} --extra-vars '@#{tmpfile.path}' ]
- cmd = cmds.join(' ; ')
-
- pid = spawn(cmd, :out => $stdout, :err => $stderr, :close_others => true)
- _, state = Process.wait2(pid)
-
- if 0 != state.exitstatus
- raise %Q[Warning failed with exit code: #{state.exitstatus}
-
-#{cmd}
-
-extra_vars: #{@extra_vars.to_json}
-]
- end
- ensure
- tmpfile.unlink if tmpfile
- end
-
- def merge_extra_vars_file(file)
- vars = YAML.load_file(file)
- @extra_vars.merge!(vars)
- end
-
- def self.for_gce
- ah = AnsibleHelper.new
-
- # GCE specific configs
- gce_ini = "#{MYDIR}/../inventory/gce/gce.ini"
- config = ParseConfig.new(gce_ini)
-
- if config['gce']['gce_project_id'].to_s.empty?
- raise %Q['gce_project_id' not set in #{gce_ini}]
- end
- ah.extra_vars['gce_project_id'] = config['gce']['gce_project_id']
-
- if config['gce']['gce_service_account_pem_file_path'].to_s.empty?
- raise %Q['gce_service_account_pem_file_path' not set in #{gce_ini}]
- end
- ah.extra_vars['gce_pem_file'] = config['gce']['gce_service_account_pem_file_path']
-
- if config['gce']['gce_service_account_email_address'].to_s.empty?
- raise %Q['gce_service_account_email_address' not set in #{gce_ini}]
- end
- ah.extra_vars['gce_service_account_email'] = config['gce']['gce_service_account_email_address']
-
- ah.inventory = 'inventory/gce/gce.py'
- return ah
- end
-
- def self.for_aws
- ah = AnsibleHelper.new
-
- ah.inventory = 'inventory/aws/ec2.py'
- return ah
- end
- end
- end
-end
diff --git a/lib/aws_command.rb b/lib/aws_command.rb
deleted file mode 100644
index 267513f37..000000000
--- a/lib/aws_command.rb
+++ /dev/null
@@ -1,148 +0,0 @@
-require 'thor'
-
-require_relative 'aws_helper'
-require_relative 'launch_helper'
-
-module OpenShift
- module Ops
- class AwsCommand < Thor
- # WARNING: we do not currently support environments with hyphens in the name
- SUPPORTED_ENVS = %w(prod stg int ops twiest gshipley kint test jhonce amint tdint lint jdetiber)
-
- option :type, :required => true, :enum => LaunchHelper.get_aws_host_types,
- :desc => 'The host type of the new instances.'
- option :env, :required => true, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment of the new instances.'
- option :count, :default => 1, :aliases => '-c', :type => :numeric,
- :desc => 'The number of instances to create'
- option :tag, :type => :array,
- :desc => 'The tag(s) to add to the new instances. Allowed characters are letters, numbers, and hyphens.'
- desc "launch", "Launches instances."
- def launch()
- AwsHelper.check_creds()
-
- # Expand all of the instance names so that we have a complete array
- names = []
- options[:count].times { names << "#{options[:env]}-#{options[:type]}-#{SecureRandom.hex(5)}" }
-
- ah = AnsibleHelper.for_aws()
-
- # AWS specific configs
- ah.extra_vars['oo_new_inst_names'] = names
- ah.extra_vars['oo_new_inst_tags'] = options[:tag]
- ah.extra_vars['oo_env'] = options[:env]
-
- # Add a created by tag
- ah.extra_vars['oo_new_inst_tags'] = {} if ah.extra_vars['oo_new_inst_tags'].nil?
-
- ah.extra_vars['oo_new_inst_tags']['created-by'] = ENV['USER']
- ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_env_tag(options[:env]))
- ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_host_type_tag(options[:type]))
- ah.extra_vars['oo_new_inst_tags'].merge!(AwsHelper.generate_env_host_type_tag(options[:env], options[:type]))
-
- puts
- puts "Creating #{options[:count]} #{options[:type]} instance(s) in AWS..."
-
- # Make sure we're completely up to date before launching
- clear_cache()
- ah.run_playbook("playbooks/aws/#{options[:type]}/launch.yml")
- ensure
- # This is so that if we a config right after a launch, the newly launched instances will be
- # in the list.
- clear_cache()
- end
-
- desc "clear-cache", 'Clear the inventory cache'
- def clear_cache()
- print "Clearing inventory cache... "
- AwsHelper.clear_inventory_cache()
- puts "Done."
- end
-
- option :name, :required => false, :type => :string,
- :desc => 'The name of the instance to configure.'
- option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment of the new instances.'
- option :type, :required => false, :enum => LaunchHelper.get_aws_host_types,
- :desc => 'The type of the instances to configure.'
- desc "config", 'Configures instances.'
- def config()
- ah = AnsibleHelper.for_aws()
-
- abort 'Error: you can\'t specify both --name and --type' unless options[:type].nil? || options[:name].nil?
-
- abort 'Error: you can\'t specify both --name and --env' unless options[:env].nil? || options[:name].nil?
-
- host_type = nil
- if options[:name]
- details = AwsHelper.get_host_details(options[:name])
- ah.extra_vars['oo_host_group_exp'] = details['ec2_public_dns_name']
- ah.extra_vars['oo_env'] = details['ec2_tag_environment']
- host_type = details['ec2_tag_host-type']
- elsif options[:type] && options[:env]
- oo_env_host_type_tag = AwsHelper.generate_env_host_type_tag_name(options[:env], options[:type])
- ah.extra_vars['oo_host_group_exp'] = "groups['#{oo_env_host_type_tag}']"
- ah.extra_vars['oo_env'] = options[:env]
- host_type = options[:type]
- else
- abort 'Error: you need to specify either --name or (--type and --env)'
- end
-
- puts
- puts "Configuring #{options[:type]} instance(s) in AWS..."
-
- ah.run_playbook("playbooks/aws/#{host_type}/config.yml")
- end
-
- option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment to list.'
- desc "list", "Lists instances."
- def list()
- AwsHelper.check_creds()
- hosts = AwsHelper.get_hosts()
-
- hosts.delete_if { |h| h.env != options[:env] } unless options[:env].nil?
-
- fmt_str = "%34s %5s %8s %17s %7s"
-
- puts
- puts fmt_str % ['Name','Env', 'State', 'IP Address', 'Created By']
- puts fmt_str % ['----','---', '-----', '----------', '----------']
- hosts.each { |h| puts fmt_str % [h.name, h.env, h.state, h.public_ip, h.created_by ] }
- puts
- end
-
- desc "ssh", "Ssh to an instance"
- def ssh(*ssh_ops, host)
- if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)/
- user = $1
- host = $2
- end
-
- details = AwsHelper.get_host_details(host)
- abort "\nError: Instance [#{host}] is not RUNNING\n\n" unless details['ec2_state'] == 'running'
-
- cmd = "ssh #{ssh_ops.join(' ')}"
-
- if user.nil?
- cmd += " "
- else
- cmd += " #{user}@"
- end
-
- cmd += "#{details['ec2_ip_address']}"
-
- exec(cmd)
- end
-
- desc 'types', 'Displays instance types'
- def types()
- puts
- puts "Available Host Types"
- puts "--------------------"
- LaunchHelper.get_aws_host_types.each { |t| puts " #{t}" }
- puts
- end
- end
- end
-end
diff --git a/lib/aws_helper.rb b/lib/aws_helper.rb
deleted file mode 100644
index 4da5d0925..000000000
--- a/lib/aws_helper.rb
+++ /dev/null
@@ -1,85 +0,0 @@
-require 'fileutils'
-
-module OpenShift
- module Ops
- class AwsHelper
- MYDIR = File.expand_path(File.dirname(__FILE__))
-
- def self.get_list()
- cmd = "#{MYDIR}/../inventory/aws/ec2.py --list"
- hosts = %x[#{cmd} 2>&1]
-
- raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
- return JSON.parse(hosts)
- end
-
- def self.get_hosts()
- hosts = get_list()
-
- retval = []
- hosts['_meta']['hostvars'].each do |host, info|
- retval << OpenStruct.new({
- :name => info['ec2_tag_Name'] || 'UNSET',
- :env => info['ec2_tag_environment'] || 'UNSET',
- :public_ip => info['ec2_ip_address'],
- :public_dns => info['ec2_public_dns_name'],
- :state => info['ec2_state'],
- :created_by => info['ec2_tag_created-by']
- })
- end
-
- retval.sort_by! { |h| [h.env, h.state, h.name] }
-
- return retval
- end
-
- def self.get_host_details(host)
- hosts = get_list()
- dns_names = hosts["tag_Name_#{host}"]
-
- raise "Host not found [#{host}]" if dns_names.nil?
- raise "Multiple entries found for [#{host}]" if dns_names.size > 1
-
- return hosts['_meta']['hostvars'][dns_names.first]
- end
-
- def self.check_creds()
- raise "AWS_ACCESS_KEY_ID environment variable must be set" if ENV['AWS_ACCESS_KEY_ID'].nil?
- raise "AWS_SECRET_ACCESS_KEY environment variable must be set" if ENV['AWS_SECRET_ACCESS_KEY'].nil?
- end
-
- def self.clear_inventory_cache()
- path = "#{ENV['HOME']}/.ansible/tmp"
- cache_files = ["#{path}/ansible-ec2.cache", "#{path}/ansible-ec2.index"]
- FileUtils.rm_f(cache_files)
- end
-
- def self.generate_env_tag(env)
- return { "environment" => env }
- end
-
- def self.generate_env_tag_name(env)
- h = generate_env_tag(env)
- return "tag_#{h.keys.first}_#{h.values.first}"
- end
-
- def self.generate_host_type_tag(host_type)
- return { "host-type" => host_type }
- end
-
- def self.generate_host_type_tag_name(host_type)
- h = generate_host_type_tag(host_type)
- return "tag_#{h.keys.first}_#{h.values.first}"
- end
-
- def self.generate_env_host_type_tag(env, host_type)
- return { "env-host-type" => "#{env}-#{host_type}" }
- end
-
- def self.generate_env_host_type_tag_name(env, host_type)
- h = generate_env_host_type_tag(env, host_type)
- return "tag_#{h.keys.first}_#{h.values.first}"
- end
- end
- end
-end
diff --git a/lib/gce_command.rb b/lib/gce_command.rb
deleted file mode 100644
index 214cc1c05..000000000
--- a/lib/gce_command.rb
+++ /dev/null
@@ -1,228 +0,0 @@
-require 'thor'
-require 'securerandom'
-require 'fileutils'
-
-require_relative 'gce_helper'
-require_relative 'launch_helper'
-require_relative 'ansible_helper'
-
-module OpenShift
- module Ops
- class GceCommand < Thor
- # WARNING: we do not currently support environments with hyphens in the name
- SUPPORTED_ENVS = %w(prod stg int twiest gshipley kint test jhonce amint tdint lint jdetiber)
-
- option :type, :required => true, :enum => LaunchHelper.get_gce_host_types,
- :desc => 'The host type of the new instances.'
- option :env, :required => true, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment of the new instances.'
- option :count, :default => 1, :aliases => '-c', :type => :numeric,
- :desc => 'The number of instances to create'
- option :tag, :type => :array,
- :desc => 'The tag(s) to add to the new instances. Allowed characters are letters, numbers, and hyphens.'
- desc "launch", "Launches instances."
- def launch()
- # Expand all of the instance names so that we have a complete array
- names = []
- options[:count].times { names << "#{options[:env]}-#{options[:type]}-#{SecureRandom.hex(5)}" }
-
- ah = AnsibleHelper.for_gce()
-
- # GCE specific configs
- ah.extra_vars['oo_new_inst_names'] = names
- ah.extra_vars['oo_new_inst_tags'] = options[:tag]
- ah.extra_vars['oo_env'] = options[:env]
-
- # Add a created by tag
- ah.extra_vars['oo_new_inst_tags'] = [] if ah.extra_vars['oo_new_inst_tags'].nil?
-
- ah.extra_vars['oo_new_inst_tags'] << "created-by-#{ENV['USER']}"
- ah.extra_vars['oo_new_inst_tags'] << GceHelper.generate_env_tag(options[:env])
- ah.extra_vars['oo_new_inst_tags'] << GceHelper.generate_host_type_tag(options[:type])
- ah.extra_vars['oo_new_inst_tags'] << GceHelper.generate_env_host_type_tag(options[:env], options[:type])
-
- puts
- puts "Creating #{options[:count]} #{options[:type]} instance(s) in GCE..."
-
- ah.run_playbook("playbooks/gce/#{options[:type]}/launch.yml")
- end
-
-
- option :name, :required => false, :type => :string,
- :desc => 'The name of the instance to configure.'
- option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment of the new instances.'
- option :type, :required => false, :enum => LaunchHelper.get_gce_host_types,
- :desc => 'The type of the instances to configure.'
- desc "config", 'Configures instances.'
- def config()
- ah = AnsibleHelper.for_gce()
-
- abort 'Error: you can\'t specify both --name and --type' unless options[:type].nil? || options[:name].nil?
-
- abort 'Error: you can\'t specify both --name and --env' unless options[:env].nil? || options[:name].nil?
-
- host_type = nil
- if options[:name]
- details = GceHelper.get_host_details(options[:name])
- ah.extra_vars['oo_host_group_exp'] = options[:name]
- ah.extra_vars['oo_env'] = details['env']
- host_type = details['host-type']
- elsif options[:type] && options[:env]
- oo_env_host_type_tag = GceHelper.generate_env_host_type_tag_name(options[:env], options[:type])
- ah.extra_vars['oo_host_group_exp'] = "groups['#{oo_env_host_type_tag}']"
- ah.extra_vars['oo_env'] = options[:env]
- host_type = options[:type]
- else
- abort 'Error: you need to specify either --name or (--type and --env)'
- end
-
- puts
- puts "Configuring #{options[:type]} instance(s) in GCE..."
-
- ah.run_playbook("playbooks/gce/#{host_type}/config.yml")
- end
-
- option :name, :required => false, :type => :string,
- :desc => 'The name of the instance to terminate.'
- option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment of the new instances.'
- option :type, :required => false, :enum => LaunchHelper.get_gce_host_types,
- :desc => 'The type of the instances to configure.'
- option :confirm, :required => false, :type => :boolean,
- :desc => 'Terminate without interactive confirmation'
- desc "terminate", 'Terminate instances'
- def terminate()
- ah = AnsibleHelper.for_gce()
-
- abort 'Error: you can\'t specify both --name and --type' unless options[:type].nil? || options[:name].nil?
-
- abort 'Error: you can\'t specify both --name and --env' unless options[:env].nil? || options[:name].nil?
-
- host_type = nil
- if options[:name]
- details = GceHelper.get_host_details(options[:name])
- ah.extra_vars['oo_host_group_exp'] = options[:name]
- ah.extra_vars['oo_env'] = details['env']
- host_type = details['host-type']
- elsif options[:type] && options[:env]
- oo_env_host_type_tag = GceHelper.generate_env_host_type_tag_name(options[:env], options[:type])
- ah.extra_vars['oo_host_group_exp'] = "groups['#{oo_env_host_type_tag}']"
- ah.extra_vars['oo_env'] = options[:env]
- host_type = options[:type]
- else
- abort 'Error: you need to specify either --name or (--type and --env)'
- end
-
- puts
- puts "Terminating #{options[:type]} instance(s) in GCE..."
-
- ah.run_playbook("playbooks/gce/#{host_type}/terminate.yml")
- end
-
- option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
- :desc => 'The environment to list.'
- desc "list", "Lists instances."
- def list()
- hosts = GceHelper.get_hosts()
-
- hosts.delete_if { |h| h.env != options[:env] } unless options[:env].nil?
-
- fmt_str = "%34s %5s %8s %17s %7s"
-
- puts
- puts fmt_str % ['Name','Env', 'State', 'IP Address', 'Created By']
- puts fmt_str % ['----','---', '-----', '----------', '----------']
- hosts.each { |h| puts fmt_str % [h.name, h.env, h.state, h.public_ip, h.created_by ] }
- puts
- end
-
- option :file, :required => true, :type => :string,
- :desc => 'The name of the file to copy.'
- option :dest, :required => false, :type => :string,
- :desc => 'A relative path where files are written to.'
- desc "scp_from", "scp files from an instance"
- def scp_from(*ssh_ops, host)
- if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)$/
- user = $1
- host = $2
- end
-
- path_to_file = options['file']
- dest = options['dest']
-
- details = GceHelper.get_host_details(host)
- abort "\nError: Instance [#{host}] is not RUNNING\n\n" unless details['gce_status'] == 'RUNNING'
-
- cmd = "scp #{ssh_ops.join(' ')}"
-
- if user.nil?
- cmd += " "
- else
- cmd += " #{user}@"
- end
-
- if dest.nil?
- download = File.join(Dir.pwd, 'download')
- FileUtils.mkdir_p(download) unless File.exists?(download)
- cmd += "#{details['gce_public_ip']}:#{path_to_file} download/"
- else
- cmd += "#{details['gce_public_ip']}:#{path_to_file} #{File.expand_path(dest)}"
- end
-
- exec(cmd)
- end
-
- desc "ssh", "Ssh to an instance"
- def ssh(*ssh_ops, host)
- if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)/
- user = $1
- host = $2
- end
-
- details = GceHelper.get_host_details(host)
- abort "\nError: Instance [#{host}] is not RUNNING\n\n" unless details['gce_status'] == 'RUNNING'
-
- cmd = "ssh #{ssh_ops.join(' ')}"
-
- if user.nil?
- cmd += " "
- else
- cmd += " #{user}@"
- end
-
- cmd += "#{details['gce_public_ip']}"
-
- exec(cmd)
- end
-
- option :name, :required => true, :aliases => '-n', :type => :string,
- :desc => 'The name of the instance.'
- desc 'details', 'Displays details about an instance.'
- def details()
- name = options[:name]
-
- details = GceHelper.get_host_details(name)
-
- key_size = details.keys.max_by { |k| k.size }.size
-
- header = "Details for #{name}"
- puts
- puts header
- header.size.times { print '-' }
- puts
- details.each { |k,v| printf("%#{key_size + 2}s: %s\n", k, v) }
- puts
- end
-
- desc 'types', 'Displays instance types'
- def types()
- puts
- puts "Available Host Types"
- puts "--------------------"
- LaunchHelper.get_gce_host_types.each { |t| puts " #{t}" }
- puts
- end
- end
- end
-end
diff --git a/lib/gce_helper.rb b/lib/gce_helper.rb
deleted file mode 100644
index 19fa00020..000000000
--- a/lib/gce_helper.rb
+++ /dev/null
@@ -1,94 +0,0 @@
-require 'ostruct'
-
-module OpenShift
- module Ops
- class GceHelper
- MYDIR = File.expand_path(File.dirname(__FILE__))
-
- def self.get_list()
- cmd = "#{MYDIR}/../inventory/gce/gce.py --list"
- hosts = %x[#{cmd} 2>&1]
-
- raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
-
- return JSON.parse(hosts)
- end
-
- def self.get_tag(tags, selector)
- tags.each do |tag|
- return $1 if tag =~ selector
- end
-
- return nil
- end
-
- def self.get_hosts()
- hosts = get_list()
-
- retval = []
- hosts['_meta']['hostvars'].each do |host, info|
- retval << OpenStruct.new({
- :name => info['gce_name'],
- :env => get_tag(info['gce_tags'], /^env-(\w+)$/) || 'UNSET',
- :public_ip => info['gce_public_ip'],
- :state => info['gce_status'],
- :created_by => get_tag(info['gce_tags'], /^created-by-(\w+)$/) || 'UNSET',
- })
- end
-
- retval.sort_by! { |h| [h.env, h.state, h.name] }
-
- return retval
-
- end
-
- def self.get_host_details(host)
- cmd = "#{MYDIR}/../inventory/gce/gce.py --host #{host}"
- details = %x[#{cmd} 2>&1]
-
- raise "Error: failed to get host details\n#{details}" unless $?.exitstatus == 0
-
- retval = JSON.parse(details)
-
- raise "Error: host not found [#{host}]" if retval.empty?
-
- # Convert OpenShift specific tags to entries
- retval['gce_tags'].each do |tag|
- if tag =~ /\Ahost-type-([\w\d-]+)\z/
- retval['host-type'] = $1
- end
-
- if tag =~ /\Aenv-([\w\d]+)\z/
- retval['env'] = $1
- end
- end
-
- return retval
- end
-
- def self.generate_env_tag(env)
- return "env-#{env}"
- end
-
- def self.generate_env_tag_name(env)
- return "tag_#{generate_env_tag(env)}"
- end
-
- def self.generate_host_type_tag(host_type)
- return "host-type-#{host_type}"
- end
-
- def self.generate_host_type_tag_name(host_type)
- return "tag_#{generate_host_type_tag(host_type)}"
- end
-
- def self.generate_env_host_type_tag(env, host_type)
- return "env-host-type-#{env}-#{host_type}"
- end
-
- def self.generate_env_host_type_tag_name(env, host_type)
- return "tag_#{generate_env_host_type_tag(env, host_type)}"
- end
- end
- end
-end
diff --git a/lib/launch_helper.rb b/lib/launch_helper.rb
deleted file mode 100644
index 0fe5ea6dc..000000000
--- a/lib/launch_helper.rb
+++ /dev/null
@@ -1,30 +0,0 @@
-module OpenShift
- module Ops
- class LaunchHelper
- MYDIR = File.expand_path(File.dirname(__FILE__))
-
- def self.expand_name(name)
- return [name] unless name =~ /^([a-zA-Z0-9\-]+)\{(\d+)-(\d+)\}$/
-
- # Regex matched, so grab the values
- start_num = $2
- end_num = $3
-
- retval = []
- start_num.upto(end_num) do |i|
- retval << "#{$1}#{i}"
- end
-
- return retval
- end
-
- def self.get_gce_host_types()
- return Dir.glob("#{MYDIR}/../playbooks/gce/*").map { |d| File.basename(d) }
- end
-
- def self.get_aws_host_types()
- return Dir.glob("#{MYDIR}/../playbooks/aws/*").map { |d| File.basename(d) }
- end
- end
- end
-end
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
index c23bda3a0..4bcc8b8dc 100644
--- a/playbooks/aws/ansible-tower/launch.yml
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -22,7 +22,7 @@
group_id: "{{ oo_security_group_ids }}"
instance_type: c4.xlarge
image: "{{ rhel7_ami }}"
- count: "{{ oo_new_inst_names | oo_len }}"
+ count: "{{ oo_new_inst_names | length }}"
user_data: "{{ lookup('file', user_data_file) }}"
wait: yes
assign_public_ip: "{{ oo_assign_public_ip }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index b8961704e..7188312ed 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -32,5 +32,6 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 3eb5496e4..33e1ec25d 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -25,6 +25,14 @@
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ - set_fact:
+ a_master: "{{ master_names[0] }}"
+ - add_host: name={{ a_master }} groups=service_master
+
- include: update.yml
+- include: ../../common/openshift-cluster/create_services.yml
+ vars:
+ g_svc_master: "{{ service_master }}"
+
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
new file mode 100644
index 000000000..25cf48505
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/service.yml
@@ -0,0 +1,28 @@
+---
+- name: Call same systemctl command for openshift on all instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - fail: msg="cluster_id is required to be injected in this playbook"
+ when: cluster_id is not defined
+
+ - name: Evaluate g_service_masters
+ add_host:
+ name: "{{ item }}"
+ groups: g_service_masters
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+
+ - name: Evaluate g_service_nodes
+ add_host:
+ name: "{{ item }}"
+ groups: g_service_nodes
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+
+- include: ../../common/openshift-node/service.yml
+- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 666a8d1fb..060147659 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -79,13 +79,14 @@
group: "{{ ec2_security_groups }}"
instance_type: "{{ ec2_instance_type }}"
image: "{{ latest_ami }}"
- count: "{{ instances | oo_len }}"
+ count: "{{ instances | length }}"
vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
user_data: "{{ user_data }}"
wait: yes
instance_tags:
created-by: "{{ created_by }}"
+ environment: "{{ env }}"
env: "{{ env }}"
host-type: "{{ host_type }}"
env-host-type: "{{ env_host_type }}"
diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml
index 6b3751682..1cefad492 100644
--- a/playbooks/aws/openshift-master/launch.yml
+++ b/playbooks/aws/openshift-master/launch.yml
@@ -4,10 +4,10 @@
connection: local
gather_facts: no
-# TODO: modify atomic_ami based on deployment_type
+# TODO: modify g_ami based on deployment_type
vars:
inst_region: us-east-1
- atomic_ami: ami-86781fee
+ g_ami: ami-86781fee
user_data_file: user_data.txt
tasks:
@@ -18,13 +18,13 @@
keypair: libra
group: ['public']
instance_type: m3.large
- image: "{{ atomic_ami }}"
- count: "{{ oo_new_inst_names | oo_len }}"
+ image: "{{ g_ami }}"
+ count: "{{ oo_new_inst_names | length }}"
user_data: "{{ lookup('file', user_data_file) }}"
wait: yes
register: ec2
- - name: Add new instances public IPs to the atomic proxy host group
+ - name: Add new instances public IPs to the host group
add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
with_items: ec2.instances
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index fc9b397b4..a993a1e99 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -21,5 +21,6 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml
index 36aee14ff..e7d1f7310 100644
--- a/playbooks/aws/openshift-node/launch.yml
+++ b/playbooks/aws/openshift-node/launch.yml
@@ -4,10 +4,10 @@
connection: local
gather_facts: no
-# TODO: modify atomic_ami based on deployment_type
+# TODO: modify g_ami based on deployment_type
vars:
inst_region: us-east-1
- atomic_ami: ami-86781fee
+ g_ami: ami-86781fee
user_data_file: user_data.txt
tasks:
@@ -18,13 +18,13 @@
keypair: libra
group: ['public']
instance_type: m3.large
- image: "{{ atomic_ami }}"
- count: "{{ oo_new_inst_names | oo_len }}"
+ image: "{{ g_ami }}"
+ count: "{{ oo_new_inst_names | length }}"
user_data: "{{ lookup('file', user_data_file) }}"
wait: yes
register: ec2
- - name: Add new instances public IPs to the atomic proxy host group
+ - name: Add new instances public IPs to the host group
add_host:
hostname: "{{ item.public_ip }}"
groupname: new_ec2_instances"
diff --git a/playbooks/aws/os2-atomic-proxy/config.yml b/playbooks/aws/os2-atomic-proxy/config.yml
deleted file mode 100644
index 7d384a665..000000000
--- a/playbooks/aws/os2-atomic-proxy/config.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default(['']) }}"
- when: oo_host_group_exp is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- - "vars.{{ oo_env }}.yml"
- roles:
- - atomic_base
- - atomic_proxy
diff --git a/playbooks/aws/os2-atomic-proxy/launch.yml b/playbooks/aws/os2-atomic-proxy/launch.yml
deleted file mode 100644
index fd6b0f39a..000000000
--- a/playbooks/aws/os2-atomic-proxy/launch.yml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- gather_facts: no
-
- vars:
- inst_region: us-east-1
- atomic_ami: ami-8e239fe6
- user_data_file: user_data.txt
- oo_vpc_subnet_id: # Purposely left blank, these are here to be overridden in env vars_files
- oo_assign_public_ip: # Purposely left blank, these are here to be overridden in env vars_files
-
- vars_files:
- - vars.yml
- - "vars.{{ oo_env }}.yml"
-
- tasks:
- - name: Launch instances in VPC
- ec2:
- state: present
- region: "{{ inst_region }}"
- keypair: mmcgrath_libra
- group_id: "{{ oo_security_group_ids }}"
- instance_type: m3.large
- image: "{{ atomic_ami }}"
- count: "{{ oo_new_inst_names | oo_len }}"
- user_data: "{{ lookup('file', user_data_file) }}"
- wait: yes
- assign_public_ip: "{{ oo_assign_public_ip }}"
- vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
- when: oo_vpc_subnet_id
- register: ec2_vpc
-
- - set_fact:
- ec2: "{{ ec2_vpc }}"
- when: oo_vpc_subnet_id
-
- - name: Launch instances in Classic
- ec2:
- state: present
- region: "{{ inst_region }}"
- keypair: mmcgrath_libra
- group: ['Libra', '{{ oo_env }}', '{{ oo_env }}_proxy', '{{ oo_env }}_proxy_atomic']
- instance_type: m3.large
- image: "{{ atomic_ami }}"
- count: "{{ oo_new_inst_names | oo_len }}"
- user_data: "{{ lookup('file', user_data_file) }}"
- wait: yes
- when: not oo_vpc_subnet_id
- register: ec2_classic
-
- - set_fact:
- ec2: "{{ ec2_classic }}"
- when: not oo_vpc_subnet_id
-
- - name: Add new instances public IPs to the atomic proxy host group
- add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
- with_items: ec2.instances
-
- - name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
- with_together:
- - oo_new_inst_names
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
- - name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
- with_items: ec2.instances
- args:
- tags: "{{ oo_new_inst_tags }}"
-
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
- with_together:
- - oo_new_inst_names
- - ec2.instances
-
- - debug: var=ec2
-
- - name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
- with_items: ec2.instances
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/aws/os2-atomic-proxy/user_data.txt b/playbooks/aws/os2-atomic-proxy/user_data.txt
deleted file mode 100644
index 643d17c32..000000000
--- a/playbooks/aws/os2-atomic-proxy/user_data.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-disable_root: 0
-
-system_info:
- default_user:
- name: root
diff --git a/playbooks/aws/os2-atomic-proxy/vars.int.yml b/playbooks/aws/os2-atomic-proxy/vars.int.yml
deleted file mode 100644
index 00157cd89..000000000
--- a/playbooks/aws/os2-atomic-proxy/vars.int.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-oo_env_long: integration
-oo_zabbix_hostgroups: ['INT Environment']
diff --git a/playbooks/aws/os2-atomic-proxy/vars.prod.yml b/playbooks/aws/os2-atomic-proxy/vars.prod.yml
deleted file mode 100644
index 641afc626..000000000
--- a/playbooks/aws/os2-atomic-proxy/vars.prod.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-oo_env_long: production
-oo_zabbix_hostgroups: ['PROD Environment']
diff --git a/playbooks/aws/os2-atomic-proxy/vars.stg.yml b/playbooks/aws/os2-atomic-proxy/vars.stg.yml
deleted file mode 100644
index 1cecfc9b2..000000000
--- a/playbooks/aws/os2-atomic-proxy/vars.stg.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-oo_env_long: staging
-oo_zabbix_hostgroups: ['STG Environment']
-oo_vpc_subnet_id: subnet-700bdd07
-oo_assign_public_ip: yes
-oo_security_group_ids:
- - sg-02c2f267 # Libra (vpc)
- - sg-f0bfbe95 # stg (vpc)
- - sg-a3bfbec6 # stg_proxy (vpc)
- - sg-d4bfbeb1 # stg_proxy_atomic (vpc)
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
index d569827b4..f50903061 100644
--- a/playbooks/byo/openshift-node/config.yml
+++ b/playbooks/byo/openshift-node/config.yml
@@ -10,12 +10,14 @@
with_items: groups.nodes
- name: Evaluate oo_first_master
add_host:
- name: "{{ groups.masters[0] }}"
+ name: "{{ item }}"
groups: oo_first_master
+ with_items: groups.masters.0
- include: ../../common/openshift-node/config.yml
vars:
+ openshift_first_master: "{{ groups.masters.0 }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
new file mode 100644
index 000000000..e70709d19
--- /dev/null
+++ b/playbooks/common/openshift-cluster/create_services.yml
@@ -0,0 +1,8 @@
+---
+- name: Deploy OpenShift Services
+ hosts: "{{ g_svc_master }}"
+ connection: ssh
+ gather_facts: yes
+ roles:
+ - openshift_registry
+ - openshift_router
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 05822d118..052ed14c7 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,11 +1,10 @@
---
- name: Configure master instances
hosts: oo_masters_to_config
- vars:
- openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
roles:
- openshift_master
- - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+ - role: fluentd_master
+ when: openshift.common.use_fluentd | bool
tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
new file mode 100644
index 000000000..5636ad156
--- /dev/null
+++ b/playbooks/common/openshift-master/service.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_masters host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_masters
+ add_host: name={{ item }} groups=g_service_masters
+ with_items: oo_host_group_exp | default([])
+
+- name: Change openshift-master state on master instance(s)
+ hosts: g_service_masters
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=openshift-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 96641a274..9e642f3d3 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -4,9 +4,9 @@
roles:
- openshift_facts
tasks:
- # Since the master is registering the nodes before they are configured, we
- # need to make sure to set the node properties beforehand if we do not want
- # the defaults
+ # Since the master is generating the node certificates before they are
+ # configured, we need to make sure to set the node properties beforehand if
+ # we do not want the defaults
- openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
@@ -18,13 +18,26 @@
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
- external_id: "{{ openshift_node_external_id | default(None) }}"
resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
-
+ - name: Check status of node certificates
+ stat:
+ path: "{{ item }}"
+ with_items:
+ - "/etc/openshift/node/node.key"
+ - "/etc/openshift/node/node.kubeconfig"
+ - "/etc/openshift/node/ca.crt"
+ - "/etc/openshift/node/server.key"
+ register: stat_result
+ - set_fact:
+ certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
+ | list | intersect([false])}}"
+ node_subdir: node-{{ openshift.common.hostname }}
+ config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
+ node_cert_dir: /etc/openshift/node
- name: Create temp directory for syncing certs
hosts: localhost
@@ -37,65 +50,59 @@
register: mktemp
changed_when: False
-
- name: Register nodes
hosts: oo_first_master
vars:
- openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+ nodes_needing_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list(filter_attr='certs_missing') }}"
+ openshift_nodes: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config']) }}"
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
roles:
- openshift_register_nodes
- tasks:
- # TODO: update so that we only sync necessary configs/directories, currently
- # we sync for all nodes in oo_nodes_to_config. We will need to inspect the
- # configs on the nodes to make the determination on whether to sync or not.
- - name: Create the temp directory on the master
- file:
- path: "{{ sync_tmpdir }}"
- owner: "{{ ansible_ssh_user }}"
- mode: 0700
- state: directory
- changed_when: False
-
+ post_tasks:
- name: Create a tarball of the node config directories
- command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+ command: >
+ tar -czvf {{ item.config_dir }}.tgz
+ --transform 's|system:{{ item.node_subdir }}|node|'
+ -C {{ item.config_dir }} .
args:
- chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: openshift_nodes
- changed_when: False
+ creates: "{{ item.config_dir }}.tgz"
+ with_items: nodes_needing_certs
- name: Retrieve the node config tarballs from the master
fetch:
- src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+ src: "{{ item.config_dir }}.tgz"
dest: "{{ sync_tmpdir }}/"
+ flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: openshift_nodes
- changed_when: False
-
+ with_items: nodes_needing_certs
- name: Configure node instances
hosts: oo_nodes_to_config
- gather_facts: no
vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}/{{ groups['oo_first_master'][0] }}/{{ hostvars.localhost.mktemp.stdout }}"
- openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ openshift_node_master_api_url: "{{ hostvars[openshift_first_master].openshift.master.api_url }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
- path: "{{ openshift_node_cert_dir }}"
+ path: "{{ node_cert_dir }}"
state: directory
- # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+ # TODO: notify restart openshift-node
# possibly test service started time against certificate/config file
- # timestamps in openshift-node or openshift-sdn-node to trigger notify
+ # timestamps in openshift-node to trigger notify
- name: Unarchive the tarball on the node
unarchive:
- src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
- dest: "{{ openshift_node_cert_dir }}"
+ src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
+ dest: "{{ node_cert_dir }}"
+ when: certs_missing
roles:
- openshift_node
- - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+ - role: fluentd_node
+ when: openshift.common.use_fluentd | bool
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
@@ -110,7 +117,6 @@
- file: name={{ sync_tmpdir }} state=absent
changed_when: False
-
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
@@ -120,7 +126,6 @@
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-
# Additional config for online type deployments
- name: Additional instance config
hosts: oo_nodes_deployment_type_online
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
new file mode 100644
index 000000000..f76df089f
--- /dev/null
+++ b/playbooks/common/openshift-node/service.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_nodes host group if needed
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_nodes
+ add_host: name={{ item }} groups=g_service_nodes
+ with_items: oo_host_group_exp | default([])
+
+- name: Change openshift-node state on node instance(s)
+ hosts: g_service_nodes
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=openshift-node state="{{ new_cluster_state }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 8b8490246..8c320dbd2 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -34,4 +34,5 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 771f51e91..35737f03d 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -23,6 +23,22 @@
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
+ - set_fact:
+ a_master: "{{ master_names[0] }}"
+ - add_host: name={{ a_master }} groups=service_master
+
- include: update.yml
+- name: Deploy OpenShift Services
+ hosts: service_master
+ connection: ssh
+ gather_facts: yes
+ roles:
+ - openshift_registry
+ - openshift_router
+
+- include: ../../common/openshift-cluster/create_services.yml
+ vars:
+ g_svc_master: "{{ service_master }}"
+
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 962381306..5ba0f5a48 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,7 +16,7 @@
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-- name: List Hosts
+- name: List instance(s)
hosts: oo_list_hosts
gather_facts: no
tasks:
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
new file mode 100644
index 000000000..2d0f2ab95
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/service.yml
@@ -0,0 +1,28 @@
+---
+- name: Call same systemctl command for openshift on all instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - fail: msg="cluster_id is required to be injected in this playbook"
+ when: cluster_id is not defined
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - add_host:
+ name: "{{ item }}"
+ groups: g_service_nodes
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+ - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+ - add_host:
+ name: "{{ item }}"
+ groups: g_service_masters
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+- include: ../../common/openshift-node/service.yml
+- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/gce/openshift-cluster/wip.yml b/playbooks/gce/openshift-cluster/wip.yml
new file mode 100644
index 000000000..51a521a6b
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/wip.yml
@@ -0,0 +1,26 @@
+---
+- name: WIP
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_for_deploy
+ add_host:
+ name: "{{ item }}"
+ groups: oo_masters_for_deploy
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+
+- name: Deploy OpenShift Services
+ hosts: oo_masters_for_deploy
+ connection: ssh
+ gather_facts: yes
+ user: root
+ vars_files:
+ - vars.yml
+ roles:
+ - openshift_registry
+ - openshift_router
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index 5b1601176..54b0da2ca 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -21,4 +21,5 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
openshift_hostname: "{{ gce_private_ip }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index faf278b10..75e2005a2 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -36,3 +36,4 @@
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
new file mode 100644
index 000000000..ae095f5a2
--- /dev/null
+++ b/playbooks/libvirt/openshift-cluster/service.yml
@@ -0,0 +1,32 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Call same systemctl command for openshift on all instance(s)
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - fail: msg="cluster_id is required to be injected in this playbook"
+ when: cluster_id is not defined
+
+ - name: Evaluate g_service_masters
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: g_service_masters
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+
+ - name: Evaluate g_service_nodes
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: g_service_nodes
+ with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+
+- include: ../../common/openshift-node/service.yml
+- include: ../../common/openshift-master/service.yml
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 359d0b2f3..8291192ab 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -58,23 +58,17 @@
uri: '{{ libvirt_uri }}'
with_items: instances
-- name: Collect MAC addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
- register: scratch_mac
- with_items: instances
-
- name: Wait for the VMs to get an IP
- command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
- ignore_errors: yes
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
retries: 30
delay: 1
- name: Collect IP addresses of the VMs
- shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
register: scratch_ip
- with_items: scratch_mac.results
+ with_items: instances
- set_fact:
ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
new file mode 100644
index 000000000..abadaf5ca
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -0,0 +1,35 @@
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_masters_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_masters_to_config
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_first_master: "{{ groups.oo_first_master.0 }}"
+ openshift_hostname: "{{ ansible_default_ipv4.address }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yml b/playbooks/openstack/openshift-cluster/files/heat_stack.yml
new file mode 100644
index 000000000..c5f95d87d
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yml
@@ -0,0 +1,149 @@
+heat_template_version: 2014-10-16
+
+description: OpenShift cluster
+
+parameters:
+ cluster-id:
+ type: string
+ label: Cluster ID
+ description: Identifier of the cluster
+
+ network-prefix:
+ type: string
+ label: Network prefix
+ description: Prefix of the network objects
+
+ cidr:
+ type: string
+ label: CIDR
+ description: CIDR of the network of the cluster
+
+ dns-nameservers:
+ type: comma_delimited_list
+ label: DNS nameservers list
+ description: List of DNS nameservers
+
+ external-net:
+ type: string
+ label: External network
+ description: Name of the external network
+ default: external
+
+ ssh-incoming:
+ type: string
+ label: Source of ssh connections
+ description: Source of legitimate ssh connections
+
+resources:
+ net:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: network-prefix-net
+ params:
+ network-prefix: { get_param: network-prefix }
+
+ subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: network-prefix-subnet
+ params:
+ network-prefix: { get_param: network-prefix }
+ network: { get_resource: net }
+ cidr: { get_param: cidr }
+ dns_nameservers: { get_param: dns-nameservers }
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ str_replace:
+ template: network-prefix-router
+ params:
+ network-prefix: { get_param: network-prefix }
+ external_gateway_info:
+ network: { get_param: external-net }
+
+ interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: subnet }
+
+ node-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: network-prefix-node-secgrp
+ params:
+ network-prefix: { get_param: network-prefix }
+ description:
+ str_replace:
+ template: Security group for cluster-id OpenShift cluster nodes
+ params:
+ cluster-id: { get_param: cluster-id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh-incoming }
+ - direction: ingress
+ protocol: udp
+ port_range_min: 4789
+ port_range_max: 4789
+ remote_mode: remote_group_id
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 10250
+ port_range_max: 10250
+ remote_mode: remote_group_id
+ remote_group_id: { get_resource: master-secgrp }
+
+ master-secgrp:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: network-prefix-master-secgrp
+ params:
+ network-prefix: { get_param: network-prefix }
+ description:
+ str_replace:
+ template: Security group for cluster-id OpenShift cluster master
+ params:
+ cluster-id: { get_param: cluster-id }
+ rules:
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 22
+ port_range_max: 22
+ remote_ip_prefix: { get_param: ssh-incoming }
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 4001
+ port_range_max: 4001
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 8443
+ port_range_max: 8443
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: udp
+ port_range_min: 53
+ port_range_max: 53
+ - direction: ingress
+ protocol: tcp
+ port_range_min: 24224
+ port_range_max: 24224
+ - direction: ingress
+ protocol: udp
+ port_range_min: 24224
+ port_range_max: 24224
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
new file mode 100644
index 000000000..e789a5b69
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/files/user-data
@@ -0,0 +1,7 @@
+#cloud-config
+disable_root: true
+
+system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
diff --git a/playbooks/aws/os2-atomic-proxy/filter_plugins b/playbooks/openstack/openshift-cluster/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/aws/os2-atomic-proxy/filter_plugins
+++ b/playbooks/openstack/openshift-cluster/filter_plugins
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
new file mode 100644
index 000000000..5c86ade3f
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -0,0 +1,31 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - fail:
+ msg: "Deployment type not supported for OpenStack provider yet"
+ when: deployment_type in ['online', 'enterprise']
+
+ - include: tasks/configure_openstack.yml
+
+ - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ master_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+ - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
new file mode 100644
index 000000000..a75e350c7
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -0,0 +1,24 @@
+---
+- name: Generate oo_list_hosts group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: scratch_group=tag_env_{{ cluster_id }}
+ when: cluster_id != ''
+ - set_fact: scratch_group=all
+ when: cluster_id == ''
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_list_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- name: List Hosts
+ hosts: oo_list_hosts
+ tasks:
+ - debug:
+ msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}'
diff --git a/playbooks/aws/os2-atomic-proxy/roles b/playbooks/openstack/openshift-cluster/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/aws/os2-atomic-proxy/roles
+++ b/playbooks/openstack/openshift-cluster/roles
diff --git a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml b/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
new file mode 100644
index 000000000..2cbdb4805
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
@@ -0,0 +1,27 @@
+---
+- name: Check infra
+ command: 'heat stack-show {{ openstack_network_prefix }}-stack'
+ register: stack_show_result
+ changed_when: false
+ failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
+
+- name: Create infra
+ command: 'heat stack-create -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
+ when: stack_show_result.rc == 1
+
+- name: Update infra
+ command: 'heat stack-update -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
+ when: stack_show_result.rc == 0
+
+- name: Wait for infra readiness
+ shell: 'heat stack-show {{ openstack_network_prefix }}-stack | awk ''$2 == "stack_status" {print $4}'''
+ register: stack_show_status_result
+ until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
+ retries: 30
+ delay: 1
+ failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
+
+- name: Create ssh keypair
+ nova_keypair:
+ name: "{{ openstack_ssh_keypair }}"
+ public_key: "{{ openstack_ssh_public_key }}"
diff --git a/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml b/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..1b9696aac
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,48 @@
+---
+- name: Get net id
+ shell: 'neutron net-show {{ openstack_network_prefix }}-net | awk "/\\<id\\>/ {print \$4}"'
+ register: net_id_result
+
+- name: Launch instance(s)
+ nova_compute:
+ name: '{{ item }}'
+ image_name: '{{ deployment_vars[deployment_type].image.name | default(omit, true) }}'
+ image_id: '{{ deployment_vars[deployment_type].image.id | default(omit, true) }}'
+ flavor_ram: '{{ openstack_flavor[k8s_type].ram | default(omit, true) }}'
+ flavor_id: '{{ openstack_flavor[k8s_type].id | default(omit, true) }}'
+ flavor_include: '{{ openstack_flavor[k8s_type].include | default(omit, true) }}'
+ key_name: '{{ openstack_ssh_keypair }}'
+ security_groups: '{{ openstack_network_prefix }}-{{ k8s_type }}-secgrp'
+ nics:
+ - net-id: '{{ net_id_result.stdout }}'
+ user_data: "{{ lookup('file','files/user-data') }}"
+ meta:
+ env: '{{ cluster }}'
+ host-type: '{{ type }}'
+ env-host-type: '{{ cluster }}-openshift-{{ type }}'
+ floating_ip_pools: '{{ openstack_floating_ip_pools }}'
+ with_items: instances
+ register: nova_compute_result
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: '{{ item.item }}'
+ ansible_ssh_host: '{{ item.public_ip }}'
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: 'tag_env_{{ cluster }}, tag_host-type_{{ type }}, tag_env-host-type_{{ cluster }}-openshift-{{ type }}'
+ with_items: nova_compute_result.results
+
+- name: Wait for ssh
+ wait_for:
+ host: '{{ item.public_ip }}'
+ port: 22
+ with_items: nova_compute_result.results
+
+- name: Wait for user setup
+ command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.item].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.item].ansible_ssh_user }} user is setup'
+ register: result
+ until: result.rc == 0
+ retries: 30
+ delay: 1
+ with_items: nova_compute_result.results
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
new file mode 100644
index 000000000..2f05f0992
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -0,0 +1,43 @@
+- name: Terminate instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact: cluster_group=tag_env_{{ cluster_id }}
+ - add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_terminate
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups[cluster_group] | default([])
+
+- hosts: oo_hosts_to_terminate
+
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Retrieve the floating IPs
+ shell: "neutron floatingip-list | awk '/{{ hostvars[item].ansible_default_ipv4.address }}/ {print $2}'"
+ with_items: groups['oo_hosts_to_terminate'] | default([])
+ register: floating_ips_to_delete
+
+ - name: Terminate instance(s)
+ nova_compute:
+ name: "{{ hostvars[item].os_name }}"
+ state: absent
+ with_items: groups['oo_hosts_to_terminate'] | default([])
+
+ - name: Delete floating IPs
+ command: "neutron floatingip-delete {{ item.stdout }}"
+ with_items: floating_ips_to_delete.results | default([])
+
+ - name: Destroy the network
+ command: "heat stack-delete {{ openstack_network_prefix }}-stack"
+ register: stack_delete_result
+ changed_when: stack_delete_result.rc == 0
+ failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
new file mode 100644
index 000000000..5e7ab4e58
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
new file mode 100644
index 000000000..c754f19fc
--- /dev/null
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -0,0 +1,39 @@
+---
+openstack_infra_heat_stack: "{{ opt_infra_heat_stack | default('files/heat_stack.yml') }}"
+openstack_network_prefix: "{{ opt_network_prefix | default('openshift-ansible-'+cluster_id) }}"
+openstack_network_cidr: "{{ opt_net_cidr | default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24') }}"
+openstack_network_external_net: "{{ opt_external_net | default('external') }}"
+openstack_floating_ip_pools: "{{ opt_floating_ip_pools | default('external') | oo_split() }}"
+openstack_network_dns: "{{ opt_dns | default('8.8.8.8,8.8.4.4') | oo_split() }}"
+openstack_ssh_keypair: "{{ opt_keypair | default(lookup('env', 'LOGNAME')+'_key') }}"
+openstack_ssh_public_key: "{{ lookup('file', opt_public_key | default('~/.ssh/id_rsa.pub')) }}"
+openstack_ssh_access_from: "{{ opt_ssh_from | default('0.0.0.0/0') }}"
+openstack_flavor:
+ master:
+ ram: "{{ opt_master_flavor_ram | default(2048) }}"
+ id: "{{ opt_master_flavor_id | default() }}"
+ include: "{{ opt_master_flavor_include | default() }}"
+ node:
+ ram: "{{ opt_node_flavor_ram | default(4096) }}"
+ id: "{{ opt_node_flavor_id | default() }}"
+ include: "{{ opt_node_flavor_include | default() }}"
+
+deployment_vars:
+ origin:
+ image:
+ name: "{{ opt_image_name | default('centos-70-raw') }}"
+ id:
+ ssh_user: openshift
+ sudo: yes
+ online:
+ image:
+ name:
+ id:
+ ssh_user: root
+ sudo: no
+ enterprise:
+ image:
+ name: "{{ opt_image_name | default('centos-70-raw') }}"
+ id:
+ ssh_user: openshift
+ sudo: yes
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
index de9bb5157..2a8225740 100644
--- a/rel-eng/packages/openshift-ansible-bin
+++ b/rel-eng/packages/openshift-ansible-bin
@@ -1 +1 @@
-0.0.17-1 bin/
+0.0.18-1 bin/
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
index df529d9fd..4851be122 100644
--- a/rel-eng/packages/openshift-ansible-inventory
+++ b/rel-eng/packages/openshift-ansible-inventory
@@ -1 +1 @@
-0.0.7-1 inventory/
+0.0.8-1 inventory/
diff --git a/roles/atomic_base/README.md b/roles/atomic_base/README.md
deleted file mode 100644
index 8fe3faf7d..000000000
--- a/roles/atomic_base/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
- hosts: servers
- roles:
- - ../../roles/atomic_base
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>
diff --git a/roles/atomic_base/files/bash/bashrc b/roles/atomic_base/files/bash/bashrc
deleted file mode 100644
index 446f18f22..000000000
--- a/roles/atomic_base/files/bash/bashrc
+++ /dev/null
@@ -1,12 +0,0 @@
-# .bashrc
-
-# User specific aliases and functions
-
-alias rm='rm -i'
-alias cp='cp -i'
-alias mv='mv -i'
-
-# Source global definitions
-if [ -f /etc/bashrc ]; then
- . /etc/bashrc
-fi
diff --git a/roles/atomic_base/files/ostree/repo_config b/roles/atomic_base/files/ostree/repo_config
deleted file mode 100644
index 7038158f9..000000000
--- a/roles/atomic_base/files/ostree/repo_config
+++ /dev/null
@@ -1,10 +0,0 @@
-[core]
-repo_version=1
-mode=bare
-
-[remote "rh-atomic-controller"]
-url=https://mirror.openshift.com/libra/ostree/rhel-7-atomic-host
-branches=rh-atomic-controller/el7/x86_64/buildmaster/controller/docker;
-tls-client-cert-path=/var/lib/yum/client-cert.pem
-tls-client-key-path=/var/lib/yum/client-key.pem
-gpg-verify=false
diff --git a/roles/atomic_base/files/system/90-nofile.conf b/roles/atomic_base/files/system/90-nofile.conf
deleted file mode 100644
index 8537a4c5f..000000000
--- a/roles/atomic_base/files/system/90-nofile.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-# PAM process file descriptor limits
-# see limits.conf(5) for details.
-#Each line describes a limit for a user in the form:
-#
-#<domain> <type> <item> <value>
-* hard nofile 16384
-root soft nofile 16384
diff --git a/roles/atomic_base/meta/main.yml b/roles/atomic_base/meta/main.yml
deleted file mode 100644
index 9578ab809..000000000
--- a/roles/atomic_base/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Common base RHEL atomic configurations
- company: Red Hat
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: Apache
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies: []
diff --git a/roles/atomic_base/tasks/bash.yml b/roles/atomic_base/tasks/bash.yml
deleted file mode 100644
index 547ae83c3..000000000
--- a/roles/atomic_base/tasks/bash.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Copy .bashrc
- copy: src=bash/bashrc dest=/root/.bashrc owner=root group=root mode=0644
-
-- name: Link to .profile to .bashrc
- file: src=/root/.bashrc dest=/root/.profile owner=root group=root state=link
-
-- name: "Setup Timezone [{{ oo_timezone }}]"
- file:
- src: "/usr/share/zoneinfo/{{ oo_timezone }}"
- dest: /etc/localtime
- owner: root
- group: root
- state: link
diff --git a/roles/atomic_base/tasks/cloud_user.yml b/roles/atomic_base/tasks/cloud_user.yml
deleted file mode 100644
index e7347fc3d..000000000
--- a/roles/atomic_base/tasks/cloud_user.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Remove cloud-user account
- user: name=cloud-user state=absent remove=yes force=yes
-
-- name: Remove cloud-user sudo
- file: path=/etc/sudoers.d/90-cloud-init-users state=absent
diff --git a/roles/atomic_base/tasks/main.yml b/roles/atomic_base/tasks/main.yml
deleted file mode 100644
index 5d8e8571a..000000000
--- a/roles/atomic_base/tasks/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: system.yml
-- include: bash.yml
-- include: ostree.yml
diff --git a/roles/atomic_base/tasks/ostree.yml b/roles/atomic_base/tasks/ostree.yml
deleted file mode 100644
index aacaa5efd..000000000
--- a/roles/atomic_base/tasks/ostree.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- name: Copy ostree repo config
- copy:
- src: ostree/repo_config
- dest: /ostree/repo/config
- owner: root
- group: root
- mode: 0644
-
-- name: "WORK AROUND: Stat redhat repo file"
- stat: path=/etc/yum.repos.d/redhat.repo
- register: redhat_repo
-
-- name: "WORK AROUND: subscription manager failures"
- file:
- path: /etc/yum.repos.d/redhat.repo
- state: touch
- when: redhat_repo.stat.exists == False
diff --git a/roles/atomic_base/tasks/system.yml b/roles/atomic_base/tasks/system.yml
deleted file mode 100644
index e5cde427d..000000000
--- a/roles/atomic_base/tasks/system.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Upload nofile limits.d file
- copy: src=system/90-nofile.conf dest=/etc/security/limits.d/90-nofile.conf owner=root group=root mode=0644
diff --git a/roles/atomic_base/vars/main.yml b/roles/atomic_base/vars/main.yml
deleted file mode 100644
index d4e61175c..000000000
--- a/roles/atomic_base/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-oo_timezone: US/Eastern
diff --git a/roles/atomic_proxy/README.md b/roles/atomic_proxy/README.md
deleted file mode 100644
index 348eaee1f..000000000
--- a/roles/atomic_proxy/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-Role Name
-========
-
-The purpose of this role is to do common configurations for all RHEL atomic hosts.
-
-
-Requirements
-------------
-
-None
-
-
-Role Variables
---------------
-
-None
-
-
-Dependencies
-------------
-
-None
-
-
-Example Playbook
--------------------------
-
-From a group playbook:
-
- hosts: servers
- roles:
- - ../../roles/atomic_proxy
-
-
-License
--------
-
-Copyright 2012-2014 Red Hat, Inc., All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-
-Author Information
-------------------
-
-Thomas Wiest <twiest@redhat.com>
diff --git a/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json b/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
deleted file mode 100644
index c15835d48..000000000
--- a/roles/atomic_proxy/files/proxy_containers_deploy_descriptor.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "Containers":[
- {
- "Name":"proxy-puppet",
- "Count":1,
- "Image":"puppet:latest",
- "PublicPorts":[
- ]
- },
- {
- "Name":"proxy",
- "Count":1,
- "Image":"proxy:latest",
- "PublicPorts":[
- {"Internal":80,"External":80},
- {"Internal":443,"External":443},
- {"Internal":4999,"External":4999}
- ]
- },
- {
- "Name":"proxy-monitoring",
- "Count":1,
- "Image":"monitoring:latest",
- "PublicPorts":[
- ]
- }
- ],
- "RandomizeIds": false
-}
diff --git a/roles/atomic_proxy/files/puppet/auth.conf b/roles/atomic_proxy/files/puppet/auth.conf
deleted file mode 100644
index b31906bae..000000000
--- a/roles/atomic_proxy/files/puppet/auth.conf
+++ /dev/null
@@ -1,116 +0,0 @@
-# This is the default auth.conf file, which implements the default rules
-# used by the puppet master. (That is, the rules below will still apply
-# even if this file is deleted.)
-#
-# The ACLs are evaluated in top-down order. More specific stanzas should
-# be towards the top of the file and more general ones at the bottom;
-# otherwise, the general rules may "steal" requests that should be
-# governed by the specific rules.
-#
-# See http://docs.puppetlabs.com/guides/rest_auth_conf.html for a more complete
-# description of auth.conf's behavior.
-#
-# Supported syntax:
-# Each stanza in auth.conf starts with a path to match, followed
-# by optional modifiers, and finally, a series of allow or deny
-# directives.
-#
-# Example Stanza
-# ---------------------------------
-# path /path/to/resource # simple prefix match
-# # path ~ regex # alternately, regex match
-# [environment envlist]
-# [method methodlist]
-# [auth[enthicated] {yes|no|on|off|any}]
-# allow [host|backreference|*|regex]
-# deny [host|backreference|*|regex]
-# allow_ip [ip|cidr|ip_wildcard|*]
-# deny_ip [ip|cidr|ip_wildcard|*]
-#
-# The path match can either be a simple prefix match or a regular
-# expression. `path /file` would match both `/file_metadata` and
-# `/file_content`. Regex matches allow the use of backreferences
-# in the allow/deny directives.
-#
-# The regex syntax is the same as for Ruby regex, and captures backreferences
-# for use in the `allow` and `deny` lines of that stanza
-#
-# Examples:
-#
-# path ~ ^/path/to/resource # Equivalent to `path /path/to/resource`.
-# allow * # Allow all authenticated nodes (since auth
-# # defaults to `yes`).
-#
-# path ~ ^/catalog/([^/]+)$ # Permit nodes to access their own catalog (by
-# allow $1 # certname), but not any other node's catalog.
-#
-# path ~ ^/file_(metadata|content)/extra_files/ # Only allow certain nodes to
-# auth yes # access the "extra_files"
-# allow /^(.+)\.example\.com$/ # mount point; note this must
-# allow_ip 192.168.100.0/24 # go ABOVE the "/file" rule,
-# # since it is more specific.
-#
-# environment:: restrict an ACL to a comma-separated list of environments
-# method:: restrict an ACL to a comma-separated list of HTTP methods
-# auth:: restrict an ACL to an authenticated or unauthenticated request
-# the default when unspecified is to restrict the ACL to authenticated requests
-# (ie exactly as if auth yes was present).
-#
-
-### Authenticated ACLs - these rules apply only when the client
-### has a valid certificate and is thus authenticated
-
-# allow nodes to retrieve their own catalog
-path ~ ^/catalog/([^/]+)$
-method find
-allow $1
-
-# allow nodes to retrieve their own node definition
-path ~ ^/node/([^/]+)$
-method find
-allow $1
-
-# allow all nodes to access the certificates services
-path /certificate_revocation_list/ca
-method find
-allow *
-
-# allow all nodes to store their own reports
-path ~ ^/report/([^/]+)$
-method save
-allow $1
-
-# Allow all nodes to access all file services; this is necessary for
-# pluginsync, file serving from modules, and file serving from custom
-# mount points (see fileserver.conf). Note that the `/file` prefix matches
-# requests to both the file_metadata and file_content paths. See "Examples"
-# above if you need more granular access control for custom mount points.
-path /file
-allow *
-
-### Unauthenticated ACLs, for clients without valid certificates; authenticated
-### clients can also access these paths, though they rarely need to.
-
-# allow access to the CA certificate; unauthenticated nodes need this
-# in order to validate the puppet master's certificate
-path /certificate/ca
-auth any
-method find
-allow *
-
-# allow nodes to retrieve the certificate they requested earlier
-path /certificate/
-auth any
-method find
-allow *
-
-# allow nodes to request a new certificate
-path /certificate_request
-auth any
-method find, save
-allow *
-
-# deny everything else; this ACL is not strictly necessary, but
-# illustrates the default policy.
-path /
-auth any
diff --git a/roles/atomic_proxy/files/setup-proxy-containers.sh b/roles/atomic_proxy/files/setup-proxy-containers.sh
deleted file mode 100755
index d047c96c1..000000000
--- a/roles/atomic_proxy/files/setup-proxy-containers.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-function fail {
- msg=$1
- echo
- echo $msg
- echo
- exit 5
-}
-
-
-NUM_DATA_CTR=$(docker ps -a | grep -c proxy-shared-data-1)
-[ "$NUM_DATA_CTR" -ne 0 ] && fail "ERROR: proxy-shared-data-1 exists"
-
-
-# pre-cache the container images
-echo
-timeout --signal TERM --kill-after 30 600 docker pull busybox:latest || fail "ERROR: docker pull of busybox failed"
-
-echo
-# WORKAROUND: Setup the shared data container
-/usr/bin/docker run --name "proxy-shared-data-1" \
- -v /shared/etc/haproxy \
- -v /shared/etc/httpd \
- -v /shared/etc/openshift \
- -v /shared/etc/pki \
- -v /shared/var/run/ctr-ipc \
- -v /shared/var/lib/haproxy \
- -v /shared/usr/local \
- "busybox:latest" true
-
-# WORKAROUND: These are because we're not using a pod yet
-cp /usr/local/etc/ctr-proxy-1.service /usr/local/etc/ctr-proxy-puppet-1.service /usr/local/etc/ctr-proxy-monitoring-1.service /etc/systemd/system/
-
-systemctl daemon-reload
-
-echo
-echo -n "sleeping 10 seconds for systemd reload to take affect..."
-sleep 10
-echo " Done."
-
-# Start the services
-systemctl start ctr-proxy-puppet-1 ctr-proxy-1 ctr-proxy-monitoring-1
diff --git a/roles/atomic_proxy/handlers/main.yml b/roles/atomic_proxy/handlers/main.yml
deleted file mode 100644
index 8eedec17a..000000000
--- a/roles/atomic_proxy/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: reload systemd
- command: systemctl daemon-reload
diff --git a/roles/atomic_proxy/meta/main.yml b/roles/atomic_proxy/meta/main.yml
deleted file mode 100644
index a92d685b1..000000000
--- a/roles/atomic_proxy/meta/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-galaxy_info:
- author: Thomas Wiest
- description: Common base RHEL atomic configurations
- company: Red Hat
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: Apache
- min_ansible_version: 1.2
- platforms:
- - name: EL
- versions:
- - 7
-dependencies:
- # This is the role's PRIVATE counterpart, which is used.
- - ../../../../../atomic_private/ansible/roles/atomic_proxy
diff --git a/roles/atomic_proxy/tasks/main.yml b/roles/atomic_proxy/tasks/main.yml
deleted file mode 100644
index 073a1c61e..000000000
--- a/roles/atomic_proxy/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- include: setup_puppet.yml
-- include: setup_containers.yml
diff --git a/roles/atomic_proxy/tasks/setup_containers.yml b/roles/atomic_proxy/tasks/setup_containers.yml
deleted file mode 100644
index ee971623a..000000000
--- a/roles/atomic_proxy/tasks/setup_containers.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- name: "get output of: docker images"
- command: docker images
- changed_when: False # don't report as changed
- register: docker_images
-
-- name: docker pull busybox ONLY if it's not present
- command: "docker pull busybox:latest"
- when: "not docker_images.stdout | search('busybox.*latest')"
-
-- name: docker pull containers ONLY if they're not present (needed otherwise systemd will timeout pulling the containers)
- command: "docker pull docker-registry.ops.rhcloud.com/{{ item }}:{{ oo_env }}"
- with_items:
- - oso-v2-proxy
- - oso-v2-puppet
- - oso-v2-monitoring
- when: "not docker_images.stdout | search('docker-registry.ops.rhcloud.com/{{ item }}.*{{ oo_env }}')"
-
-- name: "get output of: docker ps -a"
- command: docker ps -a
- changed_when: False # don't report as changed
- register: docker_ps
-
-- name: run proxy-shared-data-1
- command: /usr/bin/docker run --name "proxy-shared-data-1" \
- -v /shared/etc/haproxy \
- -v /shared/etc/httpd \
- -v /shared/etc/openshift \
- -v /shared/etc/pki \
- -v /shared/var/run/ctr-ipc \
- -v /shared/var/lib/haproxy \
- -v /shared/usr/local \
- "busybox:latest" true
- when: "not docker_ps.stdout | search('proxy-shared-data-1')"
-
-- name: Deploy systemd files for containers
- template:
- src: "systemd/{{ item }}.j2"
- dest: "/etc/systemd/system/{{ item }}"
- mode: 0640
- owner: root
- group: root
- with_items:
- - ctr-proxy-1.service
- - ctr-proxy-monitoring-1.service
- - ctr-proxy-puppet-1.service
- notify: reload systemd
-
-- name: start containers
- service:
- name: "{{ item }}"
- state: started
- enabled: yes
- with_items:
- - ctr-proxy-puppet-1
- - ctr-proxy-1
- - ctr-proxy-monitoring-1
diff --git a/roles/atomic_proxy/tasks/setup_puppet.yml b/roles/atomic_proxy/tasks/setup_puppet.yml
deleted file mode 100644
index 7a599f06d..000000000
--- a/roles/atomic_proxy/tasks/setup_puppet.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: make puppet conf dir
- file:
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet"
- mode: 755
- owner: root
- group: root
- state: directory
-
-- name: upload puppet auth config
- copy:
- src: puppet/auth.conf
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/auth.conf"
- mode: 0644
- owner: root
- group: root
-
-- name: upload puppet config
- template:
- src: puppet/puppet.conf.j2
- dest: "{{ oo_proxy_puppet_volume_dir }}/etc/puppet/puppet.conf"
- mode: 0644
- owner: root
- group: root
diff --git a/roles/atomic_proxy/templates/puppet/puppet.conf.j2 b/roles/atomic_proxy/templates/puppet/puppet.conf.j2
deleted file mode 100644
index 9731ff168..000000000
--- a/roles/atomic_proxy/templates/puppet/puppet.conf.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-[main]
- # we need to override the host name of the container
- certname = ctr-proxy.{{ oo_env }}.rhcloud.com
-
- # The Puppet log directory.
- # The default value is '$vardir/log'.
- logdir = /var/log/puppet
-
- # Where Puppet PID files are kept.
- # The default value is '$vardir/run'.
- rundir = /var/run/puppet
-
- # Where SSL certificates are kept.
- # The default value is '$confdir/ssl'.
- ssldir = $vardir/ssl
- manifest = $manifestdir/site.pp
- manifestdir = /var/lib/puppet/environments/pub/$environment/manifests
- environment = {{ oo_env_long }}
- modulepath = /var/lib/puppet/environments/pub/$environment/modules:/var/lib/puppet/environments/pri/$environment/modules:/var/lib/puppet/environments/pri/production/modules:$confdir/modules:/usr/share/puppet/modules
-
-[agent]
- # The file in which puppetd stores a list of the classes
- # associated with the retrieved configuratiion. Can be loaded in
- # the separate ``puppet`` executable using the ``--loadclasses``
- # option.
- # The default value is '$confdir/classes.txt'.
- classfile = $vardir/classes.txt
-
- # Where puppetd caches the local configuration. An
- # extension indicating the cache format is added automatically.
- # The default value is '$confdir/localconfig'.
- localconfig = $vardir/localconfig
- server = puppet.ops.rhcloud.com
- environment = {{ oo_env_long }}
- pluginsync = true
- graph = true
- configtimeout = 600
- report = true
- runinterval = 3600
- splay = true
diff --git a/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2 b/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
deleted file mode 100755
index d9aa2d811..000000000
--- a/roles/atomic_proxy/templates/sync/sync-proxy-configs.sh.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-VOL_DIR=/var/lib/docker/volumes/proxy
-SSH_CMD="ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null"
-
-mkdir -p ${VOL_DIR}/etc/haproxy/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/haproxy/ ${VOL_DIR}/etc/haproxy/
-
-mkdir -p ${VOL_DIR}/etc/httpd/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/httpd/ ${VOL_DIR}/etc/httpd/
-
-mkdir -p ${VOL_DIR}/etc/pki/tls/
-rsync -e "${SSH_CMD}" -va --progress root@proxy1.{{ oo_env }}.rhcloud.com:/etc/pki/tls/ ${VOL_DIR}/etc/pki/tls/
-
-# We need to disable the haproxy chroot
-sed -i -re 's/^(\s+)chroot/\1#chroot/' /var/lib/docker/volumes/proxy/etc/haproxy/haproxy.cfg
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2
deleted file mode 100644
index 988a9f544..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-1.service.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-[Unit]
-Description=Container proxy-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-1" \
- --volumes-from proxy-shared-data-1 \
- -a stdout -a stderr -p 80:80 -p 443:443 -p 4999:4999 \
- "docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-1"
-ExecReload=-/usr/bin/docker rm "proxy-1"
-ExecStop=-/usr/bin/docker stop "proxy-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-proxy:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2
deleted file mode 100644
index 975b0061b..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-monitoring-1.service.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-[Unit]
-Description=Container proxy-monitoring-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-ExecStartPre=-/usr/bin/docker rm "proxy-monitoring-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-monitoring-1" \
- --volumes-from proxy-shared-data-1 \
- -a stdout -a stderr \
- -e "OO_ENV={{ oo_env }}" \
- -e "OO_CTR_TYPE=proxy" \
- -e "OO_ZABBIX_HOSTGROUPS={{ oo_zabbix_hostgroups | join(',') }}" \
- -e "OO_ZABBIX_TEMPLATES=Template OpenShift Proxy Ctr" \
- "docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}"
-
-ExecReload=-/usr/bin/docker stop "proxy-monitoring-1"
-ExecReload=-/usr/bin/docker rm "proxy-monitoring-1"
-ExecStop=-/usr/bin/docker stop "proxy-monitoring-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-monitoring-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-monitoring:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=LwiWtYWaAvSavH6Ze53QJg
-X-ContainerType=simple
-X-PortMapping=80:80
-X-PortMapping=443:443
-X-PortMapping=4999:4999
diff --git a/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2 b/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2
deleted file mode 100644
index c3f28f471..000000000
--- a/roles/atomic_proxy/templates/systemd/ctr-proxy-puppet-1.service.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-[Unit]
-Description=Container proxy-puppet-1
-
-
-[Service]
-Type=simple
-TimeoutStartSec=5m
-Slice=container-small.slice
-
-
-ExecStartPre=-/usr/bin/docker rm "proxy-puppet-1"
-
-ExecStart=/usr/bin/docker run --rm --name "proxy-puppet-1" \
- --volumes-from proxy-shared-data-1 \
- -v /var/lib/docker/volumes/proxy_puppet/var/lib/puppet/ssl:/var/lib/puppet/ssl \
- -v /var/lib/docker/volumes/proxy_puppet/etc/puppet:/etc/puppet \
- -a stdout -a stderr \
- "docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}"
-
-# Set links (requires container have a name)
-ExecReload=-/usr/bin/docker stop "proxy-puppet-1"
-ExecReload=-/usr/bin/docker rm "proxy-puppet-1"
-ExecStop=-/usr/bin/docker stop "proxy-puppet-1"
-
-[Install]
-WantedBy=container.target
-
-# Container information
-X-ContainerId=proxy-puppet-1
-X-ContainerImage=docker-registry.ops.rhcloud.com/oso-v2-puppet:{{ oo_env }}
-X-ContainerUserId=
-X-ContainerRequestId=Ky0lhw0onwoSDJR4GK6t3g
-X-ContainerType=simple
diff --git a/roles/atomic_proxy/vars/main.yml b/roles/atomic_proxy/vars/main.yml
deleted file mode 100644
index 1f90492fd..000000000
--- a/roles/atomic_proxy/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-oo_proxy_puppet_volume_dir: /var/lib/docker/volumes/proxy_puppet
diff --git a/roles/docker/files/enter-container.sh b/roles/docker/files/enter-container.sh
deleted file mode 100755
index 7cf5b8d83..000000000
--- a/roles/docker/files/enter-container.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-if [ $# -ne 1 ]
-then
- echo
- echo "Usage: $(basename $0) <container_name>"
- echo
- exit 1
-fi
-
-PID=$(docker inspect --format '{{.State.Pid}}' $1)
-
-nsenter --target $PID --mount --uts --ipc --net --pid
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
new file mode 100644
index 000000000..eca7419c1
--- /dev/null
+++ b/roles/docker/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+
+- name: restart docker
+ service: name=docker state=restarted
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index ca700db17..96949230d 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -1,15 +1,8 @@
---
# tasks file for docker
- name: Install docker
- yum: pkg=docker-io
+ yum: pkg=docker
- name: enable and start the docker service
service: name=docker enabled=yes state=started
-- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755
-
-# From the origin rpm there exists instructions on how to
-# setup origin properly. The following steps come from there
-- name: Change root to be in the Docker group
- user: name=root groups=dockerroot append=yes
-
diff --git a/roles/docker_storage/README.md b/roles/docker_storage/README.md
new file mode 100644
index 000000000..0d8f31afc
--- /dev/null
+++ b/roles/docker_storage/README.md
@@ -0,0 +1,39 @@
+docker_storage
+=========
+
+Configure docker_storage options
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role/docker_storage:
+ - key: df.fs
+ value: xfs
+ }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc
diff --git a/playbooks/aws/os2-atomic-proxy/vars.yml b/roles/docker_storage/defaults/main.yml
index ed97d539c..ed97d539c 100644
--- a/playbooks/aws/os2-atomic-proxy/vars.yml
+++ b/roles/docker_storage/defaults/main.yml
diff --git a/roles/docker_storage/handlers/main.yml b/roles/docker_storage/handlers/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/docker_storage/handlers/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/docker_storage/meta/main.yml b/roles/docker_storage/meta/main.yml
new file mode 100644
index 000000000..a5d51cd3a
--- /dev/null
+++ b/roles/docker_storage/meta/main.yml
@@ -0,0 +1,9 @@
+---
+galaxy_info:
+ author: Openshift
+ description: Setup docker_storage options
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies:
+- docker
diff --git a/roles/docker_storage/tasks/main.yml b/roles/docker_storage/tasks/main.yml
new file mode 100644
index 000000000..48a3fc208
--- /dev/null
+++ b/roles/docker_storage/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+- lvg:
+ pvs: "{{ dst_device }}"
+ vg: "{{ dst_vg }}"
+ register: dst_lvg
+
+- lvol:
+ lv: data
+ vg: "{{ dst_vg }}"
+ size: 95%VG
+ register: dst_lvol_data
+
+- lvol:
+ lv: metadata
+ vg: "{{ dst_vg }}"
+ size: 5%VG
+ register: dst_lvol_metadata
+
+
+- name: Update docker_storage options
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage
+ backrefs: yes
+ regexp: "^(DOCKER_STORAGE_OPTIONS=)"
+ line: '\1 --storage-opt {{ dst_options | oo_combine_key_value("=") | join(" --storage-opt ") }}'
+ when: dst_options is defined and dst_options | length > 0
+ register: dst_config
+
+
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ notify:
+ - restart docker
+ when: dst_config | changed or
+ dst_lvg | changed or
+ dst_lvol_data | changed or
+ dst_lvol_metadata | changed
diff --git a/roles/docker_storage/vars/main.yml b/roles/docker_storage/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/docker_storage/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/etcd/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml
deleted file mode 100644
index b897913f9..000000000
--- a/roles/etcd/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart etcd
- service: name=etcd state=restarted
diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/etcd/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
deleted file mode 100644
index 062d2e8a9..000000000
--- a/roles/etcd/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Install etcd
- yum: pkg=etcd state=installed disable_gpg_check=yes
-
-- name: Install etcdctl
- yum: pkg=etcdctl state=installed disable_gpg_check=yes
-
-- name: Write etcd global config file
- template: src=etcd.conf.j2 dest=/etc/etcd/etcd.conf
- notify:
- - restart etcd
-
-- name: Open firewalld port for etcd
- firewalld: port=4001/tcp permanent=false state=enabled
-
-- name: Save firewalld port for etcd
- firewalld: port=4001/tcp permanent=true state=enabled
-
-- name: Enable etcd
- service: name=etcd enabled=yes state=started
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
deleted file mode 100644
index 1b43f6552..000000000
--- a/roles/etcd/templates/etcd.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# This configuration file is written in [TOML](https://github.com/mojombo/toml)
-
-# addr = "127.0.0.1:4001"
-# bind_addr = "127.0.0.1:4001"
-# ca_file = ""
-# cert_file = ""
-# cors = []
-# cpu_profile_file = ""
-# data_dir = "."
-# discovery = "http://etcd.local:4001/v2/keys/_etcd/registry/examplecluster"
-# http_read_timeout = 10
-# http_write_timeout = 10
-# key_file = ""
-# peers = []
-# peers_file = ""
-# max_cluster_size = 9
-# max_result_buffer = 1024
-# max_retry_attempts = 3
-# name = "default-name"
-# snapshot = false
-# verbose = false
-# very_verbose = false
-
-# [peer]
-# addr = "127.0.0.1:7001"
-# bind_addr = "127.0.0.1:7001"
-# ca_file = ""
-# cert_file = ""
-# key_file = ""
-
-# [cluster]
-# active_size = 9
-# remove_delay = 1800.0
-# sync_interval = 5.0
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
new file mode 100644
index 000000000..d828db52a
--- /dev/null
+++ b/roles/fluentd_master/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# TODO: Update fluentd install and configuration when packaging is complete
+- name: download and install td-agent
+ yum:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+
+- name: Verify fluentd plugin installed
+ command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
+ register: _fluent_plugin_check
+ failed_when: false
+ changed_when: false
+
+- name: install Kubernetes fluentd plugin
+ command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes'
+ when: _fluent_plugin_check.rc == 1
+
+- name: Creates directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0755
+ with_items: ['/etc/td-agent/config.d']
+
+- name: Add include to td-agent configuration
+ lineinfile:
+ dest: '/etc/td-agent/td-agent.conf'
+ regexp: '^@include config.d'
+ line: '@include config.d/*.conf'
+ state: present
+
+- name: install Kubernetes fluentd configuration file
+ template:
+ src: kubernetes.conf.j2
+ dest: /etc/td-agent/config.d/kubernetes.conf
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: ensure td-agent is running
+ service:
+ name: 'td-agent'
+ state: started
+ enabled: yes
+
diff --git a/roles/fluentd_master/templates/kubernetes.conf.j2 b/roles/fluentd_master/templates/kubernetes.conf.j2
new file mode 100644
index 000000000..7b5c86062
--- /dev/null
+++ b/roles/fluentd_master/templates/kubernetes.conf.j2
@@ -0,0 +1,9 @@
+<match kubernetes.**>
+ type file
+ path /var/log/td-agent/containers.log
+ time_slice_format %Y%m%d
+ time_slice_wait 10m
+ time_format %Y%m%dT%H%M%S%z
+ compress gzip
+ utc
+</match>
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
new file mode 100644
index 000000000..f9ef30b83
--- /dev/null
+++ b/roles/fluentd_node/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+# TODO: Update fluentd install and configuration when packaging is complete
+- name: download and install td-agent
+ yum:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+
+- name: Verify fluentd plugin installed
+ command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
+ register: _fluent_plugin_check
+ failed_when: false
+ changed_when: false
+
+- name: install Kubernetes fluentd plugin
+ command: '/opt/td-agent/embedded/bin/gem install fluent-plugin-kubernetes'
+ when: _fluent_plugin_check.rc == 1
+
+- name: Override td-agent configuration file
+ template:
+ src: td-agent.j2
+ dest: /etc/sysconfig/td-agent
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: Creates directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0755
+ with_items: ['/etc/td-agent/config.d', '/var/log/td-agent/tmp']
+
+- name: Add include to td-agent configuration
+ lineinfile:
+ dest: '/etc/td-agent/td-agent.conf'
+ regexp: '^@include config.d'
+ line: '@include config.d/*.conf'
+ state: present
+
+- name: install Kubernetes fluentd configuration file
+ template:
+ src: kubernetes.conf.j2
+ dest: /etc/td-agent/config.d/kubernetes.conf
+ group: 'td-agent'
+ owner: 'td-agent'
+ mode: 0444
+
+- name: ensure td-agent is running
+ service:
+ name: 'td-agent'
+ state: started
+ enabled: yes
+
diff --git a/roles/fluentd_node/templates/kubernetes.conf.j2 b/roles/fluentd_node/templates/kubernetes.conf.j2
new file mode 100644
index 000000000..5f1eecb20
--- /dev/null
+++ b/roles/fluentd_node/templates/kubernetes.conf.j2
@@ -0,0 +1,53 @@
+<source>
+ type tail
+ path /var/lib/docker/containers/*/*-json.log
+ pos_file /var/log/td-agent/tmp/fluentd-docker.pos
+ time_format %Y-%m-%dT%H:%M:%S
+ tag docker.*
+ format json
+ read_from_head true
+</source>
+
+<match docker.var.lib.docker.containers.*.*.log>
+ type kubernetes
+ container_id ${tag_parts[5]}
+ tag docker.${name}
+</match>
+
+<match kubernetes>
+ type copy
+
+ <store>
+ type forward
+ send_timeout 60s
+ recover_wait 10s
+ heartbeat_interval 1s
+ phi_threshold 16
+ hard_timeout 60s
+ log_level trace
+ require_ack_response true
+ heartbeat_type tcp
+
+ <server>
+ name {{groups['oo_first_master'][0]}}
+ host {{hostvars[groups['oo_first_master'][0]].openshift.common.hostname}}
+ port 24224
+ weight 60
+ </server>
+
+ <secondary>
+ type file
+ path /var/log/td-agent/forward-failed
+ </secondary>
+ </store>
+
+ <store>
+ type file
+ path /var/log/td-agent/containers.log
+ time_slice_format %Y%m%d
+ time_slice_wait 10m
+ time_format %Y%m%dT%H%M%S%z
+ compress gzip
+ utc
+ </store>
+</match>
diff --git a/roles/fluentd_node/templates/td-agent.j2 b/roles/fluentd_node/templates/td-agent.j2
new file mode 100644
index 000000000..7245e11ec
--- /dev/null
+++ b/roles/fluentd_node/templates/td-agent.j2
@@ -0,0 +1,2 @@
+DAEMON_ARGS=
+TD_AGENT_ARGS="/usr/sbin/td-agent --log /var/log/td-agent/td-agent.log --use-v1-config"
diff --git a/roles/kube_nfs_volumes/README.md b/roles/kube_nfs_volumes/README.md
new file mode 100644
index 000000000..56c69c286
--- /dev/null
+++ b/roles/kube_nfs_volumes/README.md
@@ -0,0 +1,111 @@
+# kube_nfs_volumes
+
+This role is useful to export disks as set of Kubernetes persistent volumes.
+It does so by partitioning the disks, creating ext4 filesystem on each
+partition, mounting the partitions, exporting the mounts via NFS and adding
+these NFS shares as NFS persistent volumes to existing Kubernetes installation.
+
+All partitions on given disks are used as the persistent volumes, including
+already existing partitions! There should be no other data (such as operating
+system) on the disks!
+
+## Requirements
+
+* Running Kubernetes with NFS persistent volume support (on a remote machine).
+
+* Works only on RHEL/Fedora-like distros.
+
+## Role Variables
+
+```
+# Options of NFS exports.
+nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
+
+# Directory, where the created partitions should be mounted. They will be
+# mounted as <mount_dir>/sda1 etc.
+mount_dir: /exports
+
+# Comma-separated list of disks to partition.
+# This role always assumes that all partitions on these disks are used as
+# physical volumes.
+disks: /dev/sdb,/dev/sdc
+
+# Whether to re-partition already partitioned disks.
+# Even though the disks won't get repartitioned on 'false', all existing
+# partitions on the disk are exported via NFS as physical volumes!
+force: false
+
+# Specification of size of partitions to create. See library/partitionpool.py
+# for details.
+sizes: 100M
+
+# URL of Kubernetes API server, incl. port.
+kubernetes_url: https://10.245.1.2:6443
+
+# Token to use for authentication to the API server
+kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+```
+
+## Dependencies
+
+None
+
+## Example Playbook
+
+With this playbook, `/dev/sdb` is partitioned into 100MiB partitions, all of
+them are mounted into `/exports/sdb<N>` directory and all these directories
+are exported via NFS and added as physical volumes to Kubernetes running at
+`https://10.245.1.2:6443`.
+
+ - hosts: servers
+ roles:
+ - role: kube_nfs_volumes
+ disks: "/dev/sdb"
+ sizes: 100M
+ kubernetes_url: https://10.245.1.2:6443
+ kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+
+See library/partitionpool.py for details how `sizes` parameter can be used
+to create partitions of various sizes.
+
+## Full example
+Let's say there are two machines, 10.0.0.1 and 10.0.0.2, that we want to use as
+NFS servers for our Kubernetes cluster, running Kubernetes public API at
+https://10.245.1.2:6443.
+
+Both servers have three 1 TB disks, /dev/sda for the system and /dev/sdb and
+/dev/sdc to be partitioned. We want to split the data disks into 5, 10 and
+20 GiB partitions so that 10% of the total capacity is in 5 GiB partitions, 40%
+in 10 GiB and 50% in 20 GiB partitions.
+
+That means, each data disk will have 20x 5 GiB, 40x 10 GiB and 25x 20 GiB
+partitions.
+
+* Create an `inventory` file:
+ ```
+ [nfsservers]
+ 10.0.0.1
+ 10.0.0.2
+ ```
+
+* Create an ansible playbook, say `setupnfs.yaml`:
+ ```
+ - hosts: nfsservers
+ sudo: yes
+ roles:
+ - role: kube_nfs_volumes
+ disks: "/dev/sdb,/dev/sdc"
+ sizes: 5G:10,10G:40,20G:50
+ force: no
+ kubernetes_url: https://10.245.1.2:6443
+ kubernetes_token: tJdce6Fn3cL1112YoIJ5m2exzAbzcPZX
+ ```
+
+* Run the playbook:
+ ```
+ ansible-playbook -i inventory setupnfs.yml
+ ```
+
+## License
+
+Apache 2.0
diff --git a/roles/kube_nfs_volumes/defaults/main.yml b/roles/kube_nfs_volumes/defaults/main.yml
new file mode 100644
index 000000000..e296492f9
--- /dev/null
+++ b/roles/kube_nfs_volumes/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# Options of NFS exports.
+nfs_export_options: "*(rw,no_root_squash,insecure,no_subtree_check)"
+
+# Directory, where the created partitions should be mounted. They will be
+# mounted as <mount_dir>/sda1 etc.
+mount_dir: /exports
+
+# Force re-partitioning the disks
+force: false
diff --git a/roles/kube_nfs_volumes/handlers/main.yml b/roles/kube_nfs_volumes/handlers/main.yml
new file mode 100644
index 000000000..52f3ceffe
--- /dev/null
+++ b/roles/kube_nfs_volumes/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart nfs
+ service: name=nfs-server state=restarted
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
new file mode 100644
index 000000000..1ac8eed4d
--- /dev/null
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+"""
+Ansible module for partitioning.
+"""
+
+# There is no pyparted on our Jenkins worker
+# pylint: disable=import-error
+import parted
+
+DOCUMENTATION = """
+---
+module: partitionpool
+short_description; Partition a disk into parititions.
+description:
+ - Creates partitions on given disk based on partition sizes and their weights.
+ Unless 'force' option is set to True, it ignores already partitioned disks.
+
+ When the disk is empty or 'force' is set to True, it always creates a new
+ GPT partition table on the disk. Then it creates number of partitions, based
+ on their weights.
+
+ This module should be used when a system admin wants to split existing disk(s)
+ into pools of partitions of specific sizes. It is not intended as generic disk
+ partitioning module!
+
+ Independent on 'force' parameter value and actual disk state, the task
+ always fills 'partition_pool' fact with all partitions on given disks,
+ together with their sizes (in bytes). E.g.:
+ partition_sizes = [
+ { name: sda1, Size: 1048576000 },
+ { name: sda2, Size: 1048576000 },
+ { name: sdb1, Size: 1048576000 },
+ ...
+ ]
+
+options:
+ disk:
+ description:
+ - Disk to partition.
+ size:
+ description:
+ - Sizes of partitions to create and their weights. Has form of:
+ <size1>[:<weigth1>][,<size2>[:<weight2>][,...]]
+ - Any <size> can end with 'm' or 'M' for megabyte, 'g/G' for gigabyte
+ and 't/T' for terabyte. Megabyte is used when no unit is specified.
+ - If <weight> is missing, 1.0 is used.
+ - From each specified partition <sizeX>, number of these partitions are
+ created so they occupy spaces represented by <weightX>, proportionally to
+ other weights.
+
+ - Example 1: size=100G says, that the whole disk is split in number of 100 GiB
+ partitions. On 1 TiB disk, 10 partitions will be created.
+
+ - Example 2: size=100G:1,10G:1 says that ratio of space occupied by 100 GiB
+ partitions and 10 GiB partitions is 1:1. Therefore, on 1 TiB disk, 500 GiB
+ will be split into five 100 GiB partition and 500 GiB will be split into fifty
+ 10GiB partitions.
+ - size=100G:1,10G:1 = 5x 100 GiB and 50x 10 GiB partitions (on 1 TiB disk).
+
+ - Example 3: size=200G:1,100G:2 says that the ratio of space occupied by 200 GiB
+ partitions and 100GiB partition is 1:2. Therefore, on 1 TiB disk, 1/3
+ (300 GiB) should be occupied by 200 GiB partitions. Only one fits there,
+ so only one is created (we always round nr. of partitions *down*). Teh rest
+ (800 GiB) is split into eight 100 GiB partitions, even though it's more
+ than 2/3 of total space - free space is always allocated as much as possible.
+ - size=200G:1,100G:2 = 1x 200 GiB and 8x 100 GiB partitions (on 1 TiB disk).
+
+ - Example: size=200G:1,100G:1,50G:1 says that the ratio of space occupied by
+ 200 GiB, 100 GiB and 50 GiB partitions is 1:1:1. Therefore 1/3 of 1 TiB disk
+ is dedicated to 200 GiB partitions. Only one fits there and only one is
+ created. The rest (800 GiB) is distributed according to remaining weights:
+ 100 GiB vs 50 GiB is 1:1, we create four 100 GiB partitions (400 GiB in total)
+ and eight 50 GiB partitions (again, 400 GiB).
+ - size=200G:1,100G:1,50G:1 = 1x 200 GiB, 4x 100 GiB and 8x 50 GiB partitions
+ (on 1 TiB disk).
+
+ force:
+ description:
+ - If True, it will always overwite partition table on the disk and create new one.
+ - If False (default), it won't change existing partition tables.
+
+"""
+
+# It's not class, it's more a simple struct with almost no functionality.
+# pylint: disable=too-few-public-methods
+class PartitionSpec(object):
+ """ Simple class to represent required partitions."""
+ def __init__(self, size, weight):
+ """ Initialize the partition specifications."""
+ # Size of the partitions
+ self.size = size
+ # Relative weight of this request
+ self.weight = weight
+ # Number of partitions to create, will be calculated later
+ self.count = -1
+
+ def set_count(self, count):
+ """ Set count of parititions of this specification. """
+ self.count = count
+
+def assign_space(total_size, specs):
+ """
+ Satisfy all the PartitionSpecs according to their weight.
+ In other words, calculate spec.count of all the specs.
+ """
+ total_weight = 0.0
+ for spec in specs:
+ total_weight += float(spec.weight)
+
+ for spec in specs:
+ num_blocks = int((float(spec.weight) / total_weight) * (total_size / float(spec.size)))
+ spec.set_count(num_blocks)
+ total_size -= num_blocks * spec.size
+ total_weight -= spec.weight
+
+def partition(diskname, specs, force=False, check_mode=False):
+ """
+ Create requested partitions.
+ Returns nr. of created partitions or 0 when the disk was already partitioned.
+ """
+ count = 0
+
+ dev = parted.getDevice(diskname)
+ try:
+ disk = parted.newDisk(dev)
+ except parted.DiskException:
+ # unrecognizable format, treat as empty disk
+ disk = None
+
+ if disk and len(disk.partitions) > 0 and not force:
+ print "skipping", diskname
+ return 0
+
+ # create new partition table, wiping all existing data
+ disk = parted.freshDisk(dev, 'gpt')
+ # calculate nr. of partitions of each size
+ assign_space(dev.getSize(), specs)
+ last_megabyte = 1
+ for spec in specs:
+ for _ in range(spec.count):
+ # create the partition
+ start = parted.sizeToSectors(last_megabyte, "MiB", dev.sectorSize)
+ length = parted.sizeToSectors(spec.size, "MiB", dev.sectorSize)
+ geo = parted.Geometry(device=dev, start=start, length=length)
+ filesystem = parted.FileSystem(type='ext4', geometry=geo)
+ part = parted.Partition(
+ disk=disk,
+ type=parted.PARTITION_NORMAL,
+ fs=filesystem,
+ geometry=geo)
+ disk.addPartition(partition=part, constraint=dev.optimalAlignedConstraint)
+ last_megabyte += spec.size
+ count += 1
+ try:
+ if not check_mode:
+ disk.commit()
+ except parted.IOException:
+ # partitions have been written, but we have been unable to inform the
+ # kernel of the change, probably because they are in use.
+ # Ignore it and hope for the best...
+ pass
+ return count
+
+def parse_spec(text):
+ """ Parse string with partition specification. """
+ tokens = text.split(",")
+ specs = []
+ for token in tokens:
+ if not ":" in token:
+ token += ":1"
+
+ (sizespec, weight) = token.split(':')
+ weight = float(weight) # throws exception with reasonable error string
+
+ units = {"m": 1, "g": 1 << 10, "t": 1 << 20, "p": 1 << 30}
+ unit = units.get(sizespec[-1].lower(), None)
+ if not unit:
+ # there is no unit specifier, it must be just the number
+ size = float(sizespec)
+ unit = 1
+ else:
+ size = float(sizespec[:-1])
+ spec = PartitionSpec(int(size * unit), weight)
+ specs.append(spec)
+ return specs
+
+def get_partitions(diskpath):
+ """ Return array of partition names for given disk """
+ dev = parted.getDevice(diskpath)
+ disk = parted.newDisk(dev)
+ partitions = []
+ for part in disk.partitions:
+ (_, _, pname) = part.path.rsplit("/")
+ partitions.append({"name": pname, "size": part.getLength() * dev.sectorSize})
+
+ return partitions
+
+
+def main():
+ """ Ansible module main method. """
+ module = AnsibleModule(
+ argument_spec=dict(
+ disks=dict(required=True, type='str'),
+ force=dict(required=False, default="no", type='bool'),
+ sizes=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ disks = module.params['disks']
+ force = module.params['force']
+ if force is None:
+ force = False
+ sizes = module.params['sizes']
+
+ try:
+ specs = parse_spec(sizes)
+ except ValueError, ex:
+ err = "Error parsing sizes=" + sizes + ": " + str(ex)
+ module.fail_json(msg=err)
+
+ partitions = []
+ changed_count = 0
+ for disk in disks.split(","):
+ try:
+ changed_count += partition(disk, specs, force, module.check_mode)
+ except Exception, ex:
+ err = "Error creating partitions on " + disk + ": " + str(ex)
+ raise
+ #module.fail_json(msg=err)
+ partitions += get_partitions(disk)
+
+ module.exit_json(changed=(changed_count > 0), ansible_facts={"partition_pool": partitions})
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/roles/kube_nfs_volumes/meta/main.yml b/roles/kube_nfs_volumes/meta/main.yml
new file mode 100644
index 000000000..eb71a7a1f
--- /dev/null
+++ b/roles/kube_nfs_volumes/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Jan Safranek
+ description: Partition disks and use them as Kubernetes NFS physical volumes.
+ company: Red Hat, Inc.
+ license: license (Apache)
+ min_ansible_version: 1.4
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ - name: Fedora
+ versions:
+ - all
+ categories:
+ - cloud
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
new file mode 100644
index 000000000..f4a506234
--- /dev/null
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Install pyparted (RedHat/Fedora)
+ yum: name=pyparted,python-httplib2 state=present
+
+- name: partition the drives
+ partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
+
+- name: create filesystem
+ filesystem: fstype=ext4 dev=/dev/{{ item.name }}
+ with_items: partition_pool
+
+- name: mount
+ mount: name={{mount_dir}}/{{ item.name }} src=/dev/{{ item.name }} state=mounted fstype=ext4 passno=2
+ with_items: partition_pool
+
+- include: nfs.yml
+
+- name: export physical volumes
+ uri: url={{ kubernetes_url }}/api/v1beta3/persistentvolumes
+ method=POST
+ body='{{ lookup("template", "../templates/nfs.json.j2") }}'
+ body_format=json
+ status_code=201
+ HEADER_Authorization="Bearer {{ kubernetes_token }}"
+ with_items: partition_pool
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
new file mode 100644
index 000000000..559fcf17c
--- /dev/null
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -0,0 +1,16 @@
+---
+- name: Install NFS server on Fedora/Red Hat
+ yum: name=nfs-utils state=present
+
+- name: Start rpcbind on Fedora/Red Hat
+ service: name=rpcbind state=started enabled=yes
+
+- name: Start nfs on Fedora/Red Hat
+ service: name=nfs-server state=started enabled=yes
+
+- name: Export the directories
+ lineinfile: dest=/etc/exports
+ regexp="^{{ mount_dir }}/{{ item.name }} "
+ line="{{ mount_dir }}/{{ item.name }} {{nfs_export_options}}"
+ with_items: partition_pool
+ notify: restart nfs
diff --git a/roles/kube_nfs_volumes/templates/nfs.json.j2 b/roles/kube_nfs_volumes/templates/nfs.json.j2
new file mode 100644
index 000000000..b42886ef1
--- /dev/null
+++ b/roles/kube_nfs_volumes/templates/nfs.json.j2
@@ -0,0 +1,23 @@
+{
+ "kind": "PersistentVolume",
+ "apiVersion": "v1beta3",
+ "metadata": {
+ "name": "pv-{{ inventory_hostname | regex_replace("\.", "-") }}-{{ item.name }}",
+ "labels": {
+ "type": "nfs"
+ }
+ },
+ "spec": {
+ "capacity": {
+ "storage": "{{ item.size }}"
+ },
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "NFS": {
+ "Server": "{{ inventory_hostname }}",
+ "Path": "{{ mount_dir }}/{{ item.name }}",
+ "ReadOnly": false
+ }
+ }
+}
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index c55677c3f..f76dd84ed 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -10,7 +10,9 @@
public_hostname: "{{ openshift_public_hostname | default(None) }}"
public_ip: "{{ openshift_public_ip | default(None) }}"
use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
+
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
index 9f657a2c7..8e7d71154 100644
--- a/roles/openshift_common/vars/main.yml
+++ b/roles/openshift_common/vars/main.yml
@@ -6,6 +6,4 @@
# interfaces)
os_firewall_use_firewalld: False
-openshift_cert_parent_dir: /var/lib/openshift
-openshift_cert_relative_dir: openshift.local.certificates
-openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
+openshift_data_dir: /var/lib/openshift
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 9c2657ff2..e4d3bf26f 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1,10 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
-# disable pylint checks
-# temporarily disabled until items can be addressed:
-# fixme - until all TODO comments have been addressed
-# pylint:disable=fixme
"""Ansible module for retrieving and setting openshift related facts"""
DOCUMENTATION = '''
@@ -19,6 +15,7 @@ EXAMPLES = '''
import ConfigParser
import copy
+import os
def hostname_valid(hostname):
@@ -166,7 +163,6 @@ def normalize_gce_facts(metadata, facts):
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
- facts['external_id'] = metadata['instance']['id']
# Default to no sdn for GCE deployments
facts['use_openshift_sdn'] = False
@@ -215,7 +211,6 @@ def normalize_aws_facts(metadata, facts):
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
- facts['external_id'] = metadata['instance-id']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
@@ -247,7 +242,7 @@ def normalize_openstack_facts(metadata, facts):
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
- facts['external_id'] = metadata['uuid']
+
facts['network']['ip'] = metadata['ec2_compat']['local-ipv4']
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
@@ -288,6 +283,72 @@ def normalize_provider_facts(provider, metadata):
facts = normalize_openstack_facts(metadata, facts)
return facts
+def set_registry_url_if_unset(facts):
+ """ Set registry_url fact if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated identity providers
+ facts if they were not already present
+ """
+ for role in ('master', 'node'):
+ if role in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'registry_url' not in facts[role]:
+ registry_url = "openshift/origin-${component}:${version}"
+ if deployment_type == 'enterprise':
+ registry_url = "openshift3_beta/ose-${component}:${version}"
+ elif deployment_type == 'online':
+ registry_url = ("docker-registry.ops.rhcloud.com/"
+ "openshift3_beta/ose-${component}:${version}")
+ facts[role]['registry_url'] = registry_url
+
+ return facts
+
+def set_fluentd_facts_if_unset(facts):
+ """ Set fluentd facts if not already present in facts dict
+ dict: the facts dict updated with the generated fluentd facts if
+ missing
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated fluentd
+ facts if they were not already present
+
+ """
+ if 'common' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'use_fluentd' not in facts['common']:
+ use_fluentd = True if deployment_type == 'online' else False
+ facts['common']['use_fluentd'] = use_fluentd
+ return facts
+
+def set_identity_providers_if_unset(facts):
+ """ Set identity_providers fact if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated identity providers
+ facts if they were not already present
+ """
+ if 'master' in facts:
+ deployment_type = facts['common']['deployment_type']
+ if 'identity_providers' not in facts['master']:
+ identity_provider = dict(
+ name='allow_all', challenge=True, login=True,
+ kind='AllowAllPasswordIdentityProvider'
+ )
+ if deployment_type == 'enterprise':
+ identity_provider = dict(
+ name='deny_all', challenge=True, login=True,
+ kind='DenyAllPasswordIdentityProvider'
+ )
+
+ facts['master']['identity_providers'] = [identity_provider]
+
+ return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
@@ -299,34 +360,77 @@ def set_url_facts_if_unset(facts):
were not already present
"""
if 'master' in facts:
- for (url_var, use_ssl, port, default) in [
- ('api_url',
- facts['master']['api_use_ssl'],
- facts['master']['api_port'],
- facts['common']['hostname']),
- ('public_api_url',
- facts['master']['api_use_ssl'],
- facts['master']['api_port'],
- facts['common']['public_hostname']),
- ('console_url',
- facts['master']['console_use_ssl'],
- facts['master']['console_port'],
- facts['common']['hostname']),
- ('public_console_url' 'console_use_ssl',
- facts['master']['console_use_ssl'],
- facts['master']['console_port'],
- facts['common']['public_hostname'])]:
- if url_var not in facts['master']:
- scheme = 'https' if use_ssl else 'http'
- netloc = default
- if ((scheme == 'https' and port != '443')
- or (scheme == 'http' and port != '80')):
- netloc = "%s:%s" % (netloc, port)
- facts['master'][url_var] = urlparse.urlunparse(
- (scheme, netloc, '', '', '', '')
- )
+ api_use_ssl = facts['master']['api_use_ssl']
+ api_port = facts['master']['api_port']
+ console_use_ssl = facts['master']['console_use_ssl']
+ console_port = facts['master']['console_port']
+ console_path = facts['master']['console_path']
+ etcd_use_ssl = facts['master']['etcd_use_ssl']
+ etcd_port = facts['master']['etcd_port'],
+ hostname = facts['common']['hostname']
+ public_hostname = facts['common']['public_hostname']
+
+ if 'etcd_urls' not in facts['master']:
+ facts['master']['etcd_urls'] = [format_url(etcd_use_ssl, hostname,
+ etcd_port)]
+ if 'api_url' not in facts['master']:
+ facts['master']['api_url'] = format_url(api_use_ssl, hostname,
+ api_port)
+ if 'public_api_url' not in facts['master']:
+ facts['master']['public_api_url'] = format_url(api_use_ssl,
+ public_hostname,
+ api_port)
+ if 'console_url' not in facts['master']:
+ facts['master']['console_url'] = format_url(console_use_ssl,
+ hostname,
+ console_port,
+ console_path)
+ if 'public_console_url' not in facts['master']:
+ facts['master']['public_console_url'] = format_url(console_use_ssl,
+ public_hostname,
+ console_port,
+ console_path)
+ return facts
+
+def set_sdn_facts_if_unset(facts):
+ """ Set sdn facts if not already present in facts dict
+
+ Args:
+ facts (dict): existing facts
+ Returns:
+ dict: the facts dict updated with the generated sdn facts if they
+ were not already present
+ """
+ if 'common' in facts:
+ if 'sdn_network_plugin_name' not in facts['common']:
+ use_sdn = facts['common']['use_openshift_sdn']
+ plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
+ facts['common']['sdn_network_plugin_name'] = plugin
+
+ if 'master' in facts:
+ if 'sdn_cluster_network_cidr' not in facts['master']:
+ facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
+ if 'sdn_host_subnet_length' not in facts['master']:
+ facts['master']['sdn_host_subnet_length'] = '8'
+
return facts
+def format_url(use_ssl, hostname, port, path=''):
+ """ Format url based on ssl flag, hostname, port and path
+
+ Args:
+ use_ssl (bool): is ssl enabled
+ hostname (str): hostname
+ port (str): port
+ path (str): url path
+ Returns:
+ str: The generated url string
+ """
+ scheme = 'https' if use_ssl else 'http'
+ netloc = hostname
+ if (use_ssl and port != '443') or (not use_ssl and port != '80'):
+ netloc += ":%s" % port
+ return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
def get_current_config(facts):
""" Get current openshift config
@@ -390,7 +494,7 @@ def get_current_config(facts):
return current_config
-def apply_provider_facts(facts, provider_facts, roles):
+def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
@@ -418,11 +522,6 @@ def apply_provider_facts(facts, provider_facts, roles):
facts['common'][ip_var]
)
- if 'node' in roles:
- ext_id = provider_facts.get('external_id')
- if ext_id:
- facts['node']['external_id'] = ext_id
-
facts['provider'] = provider_facts
return facts
@@ -556,10 +655,14 @@ class OpenShiftFacts(object):
defaults = self.get_defaults(roles)
provider_facts = self.init_provider_facts()
- facts = apply_provider_facts(defaults, provider_facts, roles)
+ facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts, local_facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
+ facts = set_fluentd_facts_if_unset(facts)
+ facts = set_identity_providers_if_unset(facts)
+ facts = set_registry_url_if_unset(facts)
+ facts = set_sdn_facts_if_unset(facts)
return dict(openshift=facts)
def get_defaults(self, roles):
@@ -573,31 +676,36 @@ class OpenShiftFacts(object):
"""
defaults = dict()
- common = dict(use_openshift_sdn=True)
ip_addr = self.system_facts['default_ipv4']['address']
- common['ip'] = ip_addr
- common['public_ip'] = ip_addr
-
exit_code, output, _ = module.run_command(['hostname', '-f'])
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['nodename'],
self.system_facts['fqdn']]
hostname = choose_hostname(hostname_values)
- common['hostname'] = hostname
- common['public_hostname'] = hostname
+ common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
+ deployment_type='origin', hostname=hostname,
+ public_hostname=hostname)
+ common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
+ common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
defaults['common'] = common
if 'master' in roles:
master = dict(api_use_ssl=True, api_port='8443',
console_use_ssl=True, console_path='/console',
- console_port='8443', etcd_use_ssl=False,
- etcd_port='4001', portal_net='172.30.17.0/24')
+ console_port='8443', etcd_use_ssl=True,
+ etcd_port='4001', portal_net='172.30.0.0/16',
+ embedded_etcd=True, embedded_kube=True,
+ embedded_dns=True, dns_port='53',
+ bind_addr='0.0.0.0', session_max_seconds=3600,
+ session_name='ssn', session_secrets_file='',
+ access_token_max_seconds=86400,
+ auth_token_max_seconds=500,
+ oauth_grant_method='auto')
defaults['master'] = master
if 'node' in roles:
- node = dict(external_id=common['hostname'], pod_cidr='',
- labels={}, annotations={})
+ node = dict(pod_cidr='', labels={}, annotations={})
node['resources_cpu'] = self.system_facts['processor_cores']
node['resources_memory'] = int(
int(self.system_facts['memtotal_mb']) * 1024 * 1024 * 0.75
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 9f9d0a613..3178e318c 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -17,7 +17,7 @@ From this role:
|-------------------------------------|-----------------------|--------------------------------------------------|
| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up |
-| openshift_registry_url | UNDEF | Default docker registry to use |
+| oreg_url | UNDEF | Default docker registry to use |
| openshift_master_api_port | UNDEF | |
| openshift_master_console_port | UNDEF | |
| openshift_master_api_url | UNDEF | |
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 56cf43531..11195e83e 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -11,6 +11,10 @@ os_firewall_allow:
port: 53/tcp
- service: OpenShift dns udp
port: 53/udp
+- service: Fluentd td-agent tcp
+ port: 24224/tcp
+- service: Fluentd td-agent udp
+ port: 24224/udp
os_firewall_deny:
- service: OpenShift api http
port: 8080/tcp
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index f9e6199a5..23f8b4649 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,10 +1,16 @@
---
-# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl,
-# etcd_use_ssl actually change the master config.
+# TODO: add validation for openshift_master_identity_providers
+# TODO: add ability to configure certificates given either a local file to
+# point to or certificate contents, set in default cert locations.
+
+- assert:
+ that:
+ - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
+ when: openshift_master_oauth_grant_method is defined
- name: Set master OpenShift facts
openshift_facts:
- role: 'master'
+ role: master
local_facts:
debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
@@ -18,66 +24,104 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
etcd_port: "{{ openshift_master_etcd_port | default(None) }}"
etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}"
+ etcd_urls: "{{ openshift_master_etcd_urls | default(None) }}"
+ embedded_etcd: "{{ openshift_master_embedded_etcd | default(None) }}"
+ embedded_kube: "{{ openshift_master_embedded_kube | default(None) }}"
+ embedded_dns: "{{ openshift_master_embedded_dns | default(None) }}"
+ dns_port: "{{ openshift_master_dns_port | default(None) }}"
+ bind_addr: "{{ openshift_master_bind_addr | default(None) }}"
portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ session_max_seconds: "{{ openshift_master_session_max_seconds | default(None) }}"
+ session_name: "{{ openshift_master_session_name | default(None) }}"
+ session_secrets_file: "{{ openshift_master_session_secrets_file | default(None) }}"
+ access_token_max_seconds: "{{ openshift_master_access_token_max_seconds | default(None) }}"
+ auth_token_max_seconds: "{{ openshift_master_auth_token_max_seconds | default(None) }}"
+ identity_providers: "{{ openshift_master_identity_providers | default(None) }}"
+ registry_url: "{{ oreg_url | default(None) }}"
+ oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
+ sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
+ sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
# TODO: These values need to be configurable
- name: Set dns OpenShift facts
openshift_facts:
- role: 'dns'
+ role: dns
local_facts:
ip: "{{ openshift.common.ip }}"
- domain: local
+ domain: cluster.local
+ when: openshift.master.embedded_dns
- name: Install OpenShift Master package
- yum: pkg=openshift-master state=installed
+ yum: pkg=openshift-master state=present
register: install_result
- name: Reload systemd units
command: systemctl daemon-reload
when: install_result | changed
-- name: Create certificate parent directory if it doesn't exist
- file:
- path: "{{ openshift_cert_parent_dir }}"
- state: directory
-
- name: Create config parent directory if it doesn't exist
file:
- path: "{{ openshift_master_config | dirname }}"
+ path: "{{ openshift_master_config_dir }}"
state: directory
-# TODO: should probably use a template lookup for this
-# TODO: should allow for setting --etcd, --kubernetes options
-# TODO: recreate config if values change
-- name: Use enterprise default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-
-- name: Use online default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
+- name: Create the master certificates if they do not already exist
+ command: >
+ {{ openshift.common.admin_binary }} create-master-certs
+ --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --cert-dir={{ openshift_master_config_dir }} --overwrite=false
+ args:
+ creates: "{{ openshift_master_config_dir }}/master.server.key"
-- name: Create master config
+- name: Create the policy file if it does not already exist
command: >
- /usr/bin/openshift start master --write-config
- --config={{ openshift_master_config }}
- --portal-net={{ openshift.master.portal_net }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }}
- {{ ('--images=' ~ openshift_registry_url) if (openshift_registry_url | default('', true) != '') else '' }}
- {{ ('--nodes=' ~ openshift_node_ips | join(',')) if (openshift_node_ips | default('', true) != '') else '' }}
+ {{ openshift.common.admin_binary }} create-bootstrap-policy-file
+ --filename={{ openshift_master_policy }}
args:
- chdir: "{{ openshift_cert_parent_dir }}"
- creates: "{{ openshift_master_config }}"
+ creates: "{{ openshift_master_policy }}"
+ notify:
+ - restart openshift-master
+
+- name: Create the scheduler config
+ template:
+ dest: "{{ openshift_master_scheduler_conf }}"
+ src: scheduler.json.j2
+ notify:
+ - restart openshift-master
+
+- name: Install httpd-tools if needed
+ yum: pkg=httpd-tools state=present
+ when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
+- name: Create the htpasswd file if needed
+ copy:
+ dest: "{{ item.filename }}"
+ content: ""
+ mode: 0600
+ force: no
+ when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ with_items: openshift.master.identity_providers
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create master config
+ template:
+ dest: "{{ openshift_master_config_file }}"
+ src: master.yaml.v1.j2
+ notify:
+ - restart openshift-master
- name: Configure OpenShift settings
lineinfile:
dest: /etc/sysconfig/openshift-master
- regexp: '^OPTIONS='
- line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\""
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.master.debug_level }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_master_config_file }}"
notify:
- restart openshift-master
@@ -97,15 +141,15 @@
# TODO: Update this file if the contents of the source file are not present in
# the dest file, will need to make sure to ignore things that could be added
-- name: Create the OpenShift client config(s)
- command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig ~{{ item }}/.config/openshift/.config
+- name: Copy the OpenShift admin client config(s)
+ command: cp {{ openshift_master_config_dir }}/admin.kubeconfig ~{{ item }}/.config/openshift/.config
args:
creates: ~{{ item }}/.config/openshift/.config
with_items:
- root
- "{{ ansible_ssh_user }}"
-- name: Update the permissions on the OpenShift client config(s)
+- name: Update the permissions on the OpenShift admin client config(s)
file:
path: "~{{ item }}/.config/openshift/.config"
state: file
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
new file mode 100644
index 000000000..1c2d37b63
--- /dev/null
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -0,0 +1,98 @@
+apiVersion: v1
+assetConfig:
+ logoutURL: ""
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ publicURL: {{ openshift.master.public_console_url }}/
+ servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }}
+ certFile: master.server.crt
+ clientCA: ""
+ keyFile: master.server.key
+corsAllowedOrigins:
+{# TODO: add support for user specified corsAllowedOrigins #}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+ - {{ origin }}
+{% endfor %}
+{% if openshift.master.embedded_dns %}
+dnsConfig:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
+{% endif %}
+etcdClientInfo:
+ ca: ca.crt
+ certFile: master.etcd-client.crt
+ keyFile: master.etcd-client.key
+ urls:
+{% for etcd_url in openshift.master.etcd_urls %}
+ - {{ etcd_url }}
+{% endfor %}
+{% if openshift.master.embedded_etcd %}
+etcdConfig:
+ address: {{ openshift.common.hostname }}:{{ openshift.master.etcd_port }}
+ peerAddress: {{ openshift.common.hostname }}:7001
+ peerServingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:7001
+ certFile: etcd.server.crt
+ clientCA: ca.crt
+ keyFile: etcd.server.key
+ servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
+ certFile: etcd.server.crt
+ clientCA: ca.crt
+ keyFile: etcd.server.key
+ storageDirectory: {{ openshift_data_dir }}/openshift.local.etcd
+{% endif %}
+etcdStorageConfig:
+ kubernetesStoragePrefix: kubernetes.io
+ kubernetesStorageVersion: v1beta3
+ kubernetesStoragePrefix: kubernetes.io
+ openShiftStorageVersion: v1beta3
+imageConfig:
+ format: {{ openshift.master.registry_url }}
+ latest: false
+kind: MasterConfig
+kubeletClientInfo:
+{# TODO: allow user specified kubelet port #}
+ ca: ca.crt
+ certFile: master.kubelet-client.crt
+ keyFile: master.kubelet-client.key
+ port: 10250
+{% if openshift.master.embedded_kube %}
+kubernetesMasterConfig:
+{# TODO: support overriding masterCount #}
+ masterCount: 1
+ masterIP: ""
+ schedulerConfigFile: {{ openshift_master_scheduler_conf }}
+ servicesSubnet: {{ openshift.master.portal_net }}
+ staticNodeNames: {{ openshift_node_ips | default([], true) }}
+{% endif %}
+masterClients:
+{# TODO: allow user to set externalKubernetesKubeConfig #}
+ deployerKubeConfig: openshift-deployer.kubeconfig
+ externalKubernetesKubeConfig: ""
+ openshiftLoopbackKubeConfig: openshift-client.kubeconfig
+masterPublicURL: {{ openshift.master.public_api_url }}
+networkConfig:
+ clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
+ hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+{% include 'v1_partials/oauthConfig.j2' %}
+policyConfig:
+ bootstrapPolicyFile: {{ openshift_master_policy }}
+ openshiftSharedResourcesNamespace: openshift
+{# TODO: Allow users to override projectConfig items #}
+projectConfig:
+ defaultNodeSelector: ""
+ projectRequestMessage: ""
+ projectRequestTemplate: ""
+serviceAccountConfig:
+ managedNames:
+ - default
+ - builder
+ privateKeyFile: serviceaccounts.private.key
+ publicKeyFiles:
+ - serviceaccounts.public.key
+servingInfo:
+ bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
+ certFile: master.server.crt
+ clientCA: ca.crt
+ keyFile: master.server.key
diff --git a/roles/openshift_master/templates/scheduler.json.j2 b/roles/openshift_master/templates/scheduler.json.j2
new file mode 100644
index 000000000..833e7f3e1
--- /dev/null
+++ b/roles/openshift_master/templates/scheduler.json.j2
@@ -0,0 +1,12 @@
+{
+ "predicates": [
+ {"name": "PodFitsResources"},
+ {"name": "PodFitsPorts"},
+ {"name": "NoDiskConflict"},
+ {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
+ ],"priorities": [
+ {"name": "LeastRequestedPriority", "weight": 1},
+ {"name": "ServiceSpreadingPriority", "weight": 1},
+ {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
+ ]
+}
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
new file mode 100644
index 000000000..f6fd88c65
--- /dev/null
+++ b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
@@ -0,0 +1,78 @@
+{% macro identity_provider_config(identity_provider) %}
+ apiVersion: v1
+ kind: {{ identity_provider.kind }}
+{% if identity_provider.kind == 'HTPasswdPasswordIdentityProvider' %}
+ file: {{ identity_provider.filename }}
+{% elif identity_provider.kind == 'BasicAuthPasswordIdentityProvider' %}
+ url: {{ identity_provider.url }}
+{% for key in ('ca', 'certFile', 'keyFile') %}
+{% if key in identity_provider %}
+ {{ key }}: {{ identity_provider[key] }}"
+{% endif %}
+{% endfor %}
+{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
+ headers: {{ identity_provider.headers }}
+{% if 'clientCA' in identity_provider %}
+ clientCA: {{ identity_provider.clientCA }}
+{% endif %}
+{% elif identity_provider.kind == 'GitHubIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+{% elif identity_provider.kind == 'GoogleIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+{% if 'hostedDomain' in identity_provider %}
+ hostedDomain: {{ identity_provider.hostedDomain }}
+{% endif %}
+{% elif identity_provider.kind == 'OpenIDIdentityProvider' %}
+ clientID: {{ identity_provider.clientID }}
+ clientSecret: {{ identity_provider.clientSecret }}
+ claims:
+ id: identity_provider.claims.id
+{% for claim_key in ('preferredUsername', 'name', 'email') %}
+{% if claim_key in identity_provider.claims %}
+ {{ claim_key }}: {{ identity_provider.claims[claim_key] }}
+{% endif %}
+{% endfor %}
+ urls:
+ authorize: {{ identity_provider.urls.authorize }}
+ token: {{ identity_provider.urls.token }}
+{% if 'userInfo' in identity_provider.urls %}
+ userInfo: {{ identity_provider.userInfo }}
+{% endif %}
+{% if 'extraScopes' in identity_provider %}
+ extraScopes:
+{% for scope in identity_provider.extraScopes %}
+ - {{ scope }}
+{% endfor %}
+{% endif %}
+{% if 'extraAuthorizeParameters' in identity_provider %}
+ extraAuthorizeParameters:
+{% for param_key, param_value in identity_provider.extraAuthorizeParameters.iteritems() %}
+ {{ param_key }}: {{ param_value }}
+{% endfor %}
+{% endif %}
+{% endif %}
+{% endmacro %}
+oauthConfig:
+ assetPublicURL: {{ openshift.master.public_console_url }}/
+ grantConfig:
+ method: {{ openshift.master.oauth_grant_method }}
+ identityProviders:
+{% for identity_provider in openshift.master.identity_providers %}
+ - name: {{ identity_provider.name }}
+ challenge: {{ identity_provider.challenge }}
+ login: {{ identity_provider.login }}
+ provider:
+{{ identity_provider_config(identity_provider) }}
+{%- endfor %}
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ masterURL: {{ openshift.master.api_url }}
+ sessionConfig:
+ sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
+ sessionName: {{ openshift.master.session_name }}
+ sessionSecretsFile: {{ openshift.master.session_secrets_file }}
+ tokenConfig:
+ accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
+ authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
+{# Comment to preserve newline after authorizeTokenMaxAgeSeconds #}
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index c52d957ac..f6f69966a 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,5 +1,10 @@
---
-openshift_master_config: /etc/openshift/master.yaml
-openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
-openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
-openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
+openshift_master_config_dir: /etc/openshift/master
+openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
+openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
+openshift_master_policy: "{{ openshift_master_config_dir }}/policy.json"
+
+openshift_master_valid_grant_methods:
+- auto
+- prompt
+- deny
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 83359f164..c3c17b848 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -17,7 +17,7 @@ From this role:
| Name | Default value | |
|------------------------------------------|-----------------------|----------------------------------------|
| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
-| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
From openshift_common:
| Name | Default Value | |
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index df7ec41b6..be51195f2 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -2,3 +2,7 @@
os_firewall_allow:
- service: OpenShift kubelet
port: 10250/tcp
+- service: http
+ port: 80/tcp
+- service: https
+ port: 443/tcp
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index ca2992637..953a1421b 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,3 @@
---
- name: restart openshift-node
service: name=openshift-node state=restarted
- when: not openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 3d56bdd67..c5202650f 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,50 +1,63 @@
---
# TODO: allow for overriding default ports where possible
-# TODO: trigger the external service when restart is needed
- name: Set node OpenShift facts
openshift_facts:
- role: 'node'
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(none) }}"
+ public_hostname: "{{ openshift_public_hostname | default(none) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+ - role: node
local_facts:
+ resources_cpu: "{{ openshift_node_resources_cpu | default(none) }}"
+ resources_memory: "{{ openshift_node_resources_memory | default(none) }}"
+ pod_cidr: "{{ openshift_node_pod_cidr | default(none) }}"
+ labels: "{{ openshift_node_labels | default(none) }}"
+ annotations: "{{ openshift_node_annotations | default(none) }}"
+ registry_url: "{{ oreg_url | default(none) }}"
debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
-- name: Test if node certs and config exist
- stat: path={{ item }}
- failed_when: not result.stat.exists
- register: result
- with_items:
- - "{{ openshift_node_cert_dir }}"
- - "{{ openshift_node_cert_dir }}/ca.crt"
- - "{{ openshift_node_cert_dir }}/client.crt"
- - "{{ openshift_node_cert_dir }}/client.key"
- - "{{ openshift_node_cert_dir }}/.kubeconfig"
- - "{{ openshift_node_cert_dir }}/node-config.yaml"
- - "{{ openshift_node_cert_dir }}/server.crt"
- - "{{ openshift_node_cert_dir }}/server.key"
-
- name: Install OpenShift Node package
- yum: pkg=openshift-node state=installed
- register: install_result
+ yum: pkg=openshift-node state=present
+ register: node_install_result
+
+- name: Install openshift-sdn-ovs
+ yum: pkg=openshift-sdn-ovs state=present
+ register: sdn_install_result
+ when: openshift.common.use_openshift_sdn
- name: Reload systemd units
command: systemctl daemon-reload
- when: install_result | changed
+ when: (node_install_result | changed or (openshift.common.use_openshift_sdn
+ and sdn_install_result | changed))
+
+# TODO: add the validate parameter when there is a validation command to run
+- name: Create the Node config
+ template:
+ dest: "{{ openshift_node_config_file }}"
+ src: node.yaml.v1.j2
+ notify:
+ - restart openshift-node
-# --create-certs=false is a temporary workaround until
-# https://github.com/openshift/origin/pull/1361 is merged upstream and it is
-# the default for nodes
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
- regexp: '^OPTIONS='
- line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\""
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ with_items:
+ - regex: '^OPTIONS='
+ line: "OPTIONS=--loglevel={{ openshift.node.debug_level }}"
+ - regex: '^CONFIG_FILE='
+ line: "CONFIG_FILE={{ openshift_node_config_file }}"
notify:
- restart openshift-node
+- name: Allow NFS access for VMs
+ seboolean: name=virt_use_nfs state=yes persistent=yes
+
- name: Start and enable openshift-node
service: name=openshift-node enabled=yes state=started
- when: not openshift.common.use_openshift_sdn|bool
-
-- name: Disable openshift-node if openshift-node is managed externally
- service: name=openshift-node enabled=false
- when: openshift.common.use_openshift_sdn|bool
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
new file mode 100644
index 000000000..cab75cd49
--- /dev/null
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -0,0 +1,18 @@
+allowDisabledDocker: false
+apiVersion: v1
+dnsDomain: {{ hostvars[openshift_first_master].openshift.dns.domain }}
+dnsIP: {{ hostvars[openshift_first_master].openshift.dns.ip }}
+imageConfig:
+ format: {{ openshift.node.registry_url }}
+ latest: false
+kind: NodeConfig
+masterKubeConfig: node.kubeconfig
+networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
+nodeName: {{ openshift.common.hostname }}
+podManifestConfig: null
+servingInfo:
+ bindAddress: 0.0.0.0:10250
+ certFile: server.crt
+ clientCA: ca.crt
+ keyFile: server.key
+volumeDirectory: {{ openshift_data_dir }}/openshift.local.volumes
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
index c6be83139..cf47f8354 100644
--- a/roles/openshift_node/vars/main.yml
+++ b/roles/openshift_node/vars/main.yml
@@ -1,2 +1,3 @@
---
-openshift_node_cert_dir: /etc/openshift/node
+openshift_node_config_dir: /etc/openshift/node
+openshift_node_config_file: "{{ openshift_node_config_dir }}/node-config.yaml"
diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml
deleted file mode 100644
index a0befab44..000000000
--- a/roles/openshift_register_nodes/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-openshift_kube_api_version: v1beta1
diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py
index afa9eb27d..a8c38627b 100755
--- a/roles/openshift_register_nodes/library/kubernetes_register_node.py
+++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py
@@ -3,15 +3,13 @@
# vim: expandtab:tabstop=4:shiftwidth=4
#
# disable pylint checks
-# temporarily disabled until items can be addressed:
-# fixme - until all TODO comments have been addressed
# permanently disabled unless someone wants to refactor the object model:
# too-few-public-methods
# no-self-use
# too-many-arguments
# too-many-locals
# too-many-branches
-# pylint:disable=fixme, too-many-arguments, no-self-use
+# pylint:disable=too-many-arguments, no-self-use
# pylint:disable=too-many-locals, too-many-branches, too-few-public-methods
"""Ansible module to register a kubernetes node to the cluster"""
@@ -41,24 +39,6 @@ options:
- IP Address to associate with the node when registering.
Available in the following API versions: v1beta1.
required: false
- hostnames:
- default: []
- description:
- - Valid hostnames for this node. Available in the following API
- versions: v1beta3.
- required: false
- external_ips:
- default: []
- description:
- - External IP Addresses for this node. Available in the following API
- versions: v1beta3.
- required: false
- internal_ips:
- default: []
- description:
- - Internal IP Addresses for this node. Available in the following API
- versions: v1beta3.
- required: false
cpu:
default: null
description:
@@ -87,17 +67,6 @@ EXAMPLES = '''
hostIP: 192.168.1.1
cpu: 1
memory: 500000000
-
-# Node registration using the v1beta3 API, setting an alternate hostname,
-# internalIP, externalIP and assigning 3.5 CPU cores and 1 TiB of Memory
-- openshift_register_node:
- name: ose3.node.example.com
- api_version: v1beta3
- external_ips: ['192.168.1.5']
- internal_ips: ['10.0.0.5']
- hostnames: ['ose2.node.internal.local']
- cpu: 3.5
- memory: 1Ti
'''
@@ -313,57 +282,11 @@ class NodeSpec(object):
"""
return Util.remove_empty_elements(self.spec)
-class NodeStatus(object):
- """ Kubernetes Node Status
-
- Attributes:
- status (dict): A dictionary representing the node status
-
- Args:
- version (str): kubernetes api version
- externalIPs (list, optional): externalIPs for the node
- internalIPs (list, optional): internalIPs for the node
- hostnames (list, optional): hostnames for the node
- """
- def add_addresses(self, address_type, addresses):
- """ Adds addresses of the specified type
-
- Args:
- address_type (str): address type
- addresses (list): addresses to add
- """
- address_list = []
- for address in addresses:
- address_list.append(dict(type=address_type, address=address))
- return address_list
-
- def __init__(self, version, externalIPs=None, internalIPs=None,
- hostnames=None):
- if version == 'v1beta3':
- addresses = []
- if externalIPs is not None:
- addresses += self.add_addresses('ExternalIP', externalIPs)
- if internalIPs is not None:
- addresses += self.add_addresses('InternalIP', internalIPs)
- if hostnames is not None:
- addresses += self.add_addresses('Hostname', hostnames)
-
- self.status = dict(addresses=addresses)
-
- def get_status(self):
- """ Get the dict representing the node status
-
- Returns:
- dict: representation of the node status with any empty elements
- removed
- """
- return Util.remove_empty_elements(self.status)
-
class Node(object):
""" Kubernetes Node
Attributes:
- status (dict): A dictionary representing the node
+ node (dict): A dictionary representing the node
Args:
module (AnsibleModule):
@@ -371,9 +294,6 @@ class Node(object):
version (str, optional): kubernetes api version
node_name (str, optional): name for node
hostIP (str, optional): node host ip
- hostnames (list, optional): hostnames for the node
- externalIPs (list, optional): externalIPs for the node
- internalIPs (list, optional): internalIPs for the node
cpu (str, optional): cpu resources for the node
memory (str, optional): memory resources for the node
labels (list, optional): labels for the node
@@ -382,8 +302,7 @@ class Node(object):
externalID (str, optional): external id of the node
"""
def __init__(self, module, client_opts, version='v1beta1', node_name=None,
- hostIP=None, hostnames=None, externalIPs=None,
- internalIPs=None, cpu=None, memory=None, labels=None,
+ hostIP=None, cpu=None, memory=None, labels=None,
annotations=None, podCIDR=None, externalID=None):
self.module = module
self.client_opts = client_opts
@@ -405,9 +324,7 @@ class Node(object):
apiVersion=version,
metadata=metadata,
spec=NodeSpec(version, cpu, memory, podCIDR,
- externalID),
- status=NodeStatus(version, externalIPs,
- internalIPs, hostnames))
+ externalID))
def get_name(self):
""" Get the name for the node
@@ -418,7 +335,7 @@ class Node(object):
if self.node['apiVersion'] == 'v1beta1':
return self.node['id']
elif self.node['apiVersion'] == 'v1beta3':
- return self.node['name']
+ return self.node['metadata']['name']
def get_node(self):
""" Get the dict representing the node
@@ -432,7 +349,6 @@ class Node(object):
node['resources'] = self.node['resources'].get_resources()
elif self.node['apiVersion'] == 'v1beta3':
node['spec'] = self.node['spec'].get_spec()
- node['status'] = self.node['status'].get_status()
return Util.remove_empty_elements(node)
def exists(self):
@@ -473,52 +389,15 @@ class Node(object):
else:
return True
-def main():
- """ main """
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True, type='str'),
- host_ip=dict(type='str'),
- hostnames=dict(type='list', default=[]),
- external_ips=dict(type='list', default=[]),
- internal_ips=dict(type='list', default=[]),
- api_version=dict(type='str', default='v1beta1',
- choices=['v1beta1', 'v1beta3']),
- cpu=dict(type='str'),
- memory=dict(type='str'),
- # TODO: needs documented
- labels=dict(type='dict', default={}),
- # TODO: needs documented
- annotations=dict(type='dict', default={}),
- # TODO: needs documented
- pod_cidr=dict(type='str'),
- # TODO: needs documented
- external_id=dict(type='str'),
- # TODO: needs documented
- client_config=dict(type='str'),
- # TODO: needs documented
- client_cluster=dict(type='str', default='master'),
- # TODO: needs documented
- client_context=dict(type='str', default='default'),
- # TODO: needs documented
- client_namespace=dict(type='str', default='default'),
- # TODO: needs documented
- client_user=dict(type='str', default='system:openshift-client'),
- # TODO: needs documented
- kubectl_cmd=dict(type='list', default=['kubectl']),
- # TODO: needs documented
- kubeconfig_flag=dict(type='str'),
- # TODO: needs documented
- default_client_config=dict(type='str')
- ),
- mutually_exclusive=[
- ['host_ip', 'external_ips'],
- ['host_ip', 'internal_ips'],
- ['host_ip', 'hostnames'],
- ],
- supports_check_mode=True
- )
+def generate_client_opts(module):
+ """ Generates the client options
+ Args:
+ module(AnsibleModule)
+
+ Returns:
+ str: client options
+ """
client_config = '~/.kube/.kubeconfig'
if 'default_client_config' in module.params:
client_config = module.params['default_client_config']
@@ -533,8 +412,7 @@ def main():
kubeconfig_flag = '--kubeconfig'
if 'kubeconfig_flag' in module.params:
kubeconfig_flag = module.params['kubeconfig_flag']
- client_opts.append(kubeconfig_flag + '=' +
- os.path.expanduser(module.params['client_config']))
+ client_opts.append(kubeconfig_flag + '=' + os.path.expanduser(module.params['client_config']))
try:
config = ClientConfig(client_opts, module)
@@ -547,51 +425,85 @@ def main():
if client_context != config.current_context():
client_opts.append("--context=%s" % client_context)
else:
- module.fail_json(msg="Context %s not found in client config" %
- client_context)
+ module.fail_json(msg="Context %s not found in client config" % client_context)
client_user = module.params['client_user']
if config.has_user(client_user):
if client_user != config.get_user_for_context(client_context):
client_opts.append("--user=%s" % client_user)
else:
- module.fail_json(msg="User %s not found in client config" %
- client_user)
+ module.fail_json(msg="User %s not found in client config" % client_user)
client_cluster = module.params['client_cluster']
if config.has_cluster(client_cluster):
if client_cluster != config.get_cluster_for_context(client_context):
client_opts.append("--cluster=%s" % client_cluster)
else:
- module.fail_json(msg="Cluster %s not found in client config" %
- client_cluster)
+ module.fail_json(msg="Cluster %s not found in client config" % client_cluster)
client_namespace = module.params['client_namespace']
if client_namespace != config.get_namespace_for_context(client_context):
client_opts.append("--namespace=%s" % client_namespace)
- node = Node(module, client_opts, module.params['api_version'],
- module.params['name'], module.params['host_ip'],
- module.params['hostnames'], module.params['external_ips'],
- module.params['internal_ips'], module.params['cpu'],
- module.params['memory'], module.params['labels'],
- module.params['annotations'], module.params['pod_cidr'],
- module.params['external_id'])
+ return client_opts
+
+
+def main():
+ """ main """
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str'),
+ host_ip=dict(type='str'),
+ api_version=dict(type='str', default='v1beta1',
+ choices=['v1beta1', 'v1beta3']),
+ cpu=dict(type='str'),
+ memory=dict(type='str'),
+ # TODO: needs documented
+ labels=dict(type='dict', default={}),
+ # TODO: needs documented
+ annotations=dict(type='dict', default={}),
+ # TODO: needs documented
+ pod_cidr=dict(type='str'),
+ # TODO: needs documented
+ client_config=dict(type='str'),
+ # TODO: needs documented
+ client_cluster=dict(type='str', default='master'),
+ # TODO: needs documented
+ client_context=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_namespace=dict(type='str', default='default'),
+ # TODO: needs documented
+ client_user=dict(type='str', default='system:admin'),
+ # TODO: needs documented
+ kubectl_cmd=dict(type='list', default=['kubectl']),
+ # TODO: needs documented
+ kubeconfig_flag=dict(type='str'),
+ # TODO: needs documented
+ default_client_config=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ labels = module.params['labels']
+ kube_hostname_label = 'kubernetes.io/hostname'
+ if kube_hostname_label not in labels:
+ labels[kube_hostname_label] = module.params['name']
+
+ node = Node(module, generate_client_opts(module),
+ module.params['api_version'], module.params['name'],
+ module.params['host_ip'], module.params['cpu'],
+ module.params['memory'], labels, module.params['annotations'],
+ module.params['pod_cidr'])
- # TODO: attempt to support changing node settings where possible and/or
- # modifying node resources
if node.exists():
module.exit_json(changed=False, node=node.get_node())
elif module.check_mode:
module.exit_json(changed=True, node=node.get_node())
+ elif node.create():
+ module.exit_json(changed=True, msg="Node created successfully",
+ node=node.get_node())
else:
- if node.create():
- module.exit_json(changed=True,
- msg="Node created successfully",
- node=node.get_node())
- else:
- module.fail_json(msg="Unknown error creating node",
- node=node.get_node())
+ module.fail_json(msg="Unknown error creating node", node=node.get_node())
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml
index d4d72d126..11097a7cf 100644
--- a/roles/openshift_register_nodes/tasks/main.yml
+++ b/roles/openshift_register_nodes/tasks/main.yml
@@ -1,45 +1,42 @@
---
-# TODO: support new create-config command to generate node certs and config
-# TODO: recreate master/node configs if settings that affect the configs
-# change (hostname, public_hostname, ip, public_ip, etc)
+- name: Create openshift_generated_configs_dir if it doesn't exist
+ file:
+ path: "{{ openshift_generated_configs_dir }}"
+ state: directory
-
-# TODO: use a template lookup here
-# TODO: create a failed_when condition
-- name: Use enterprise default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined
-
-- name: Use online default for openshift_registry_url if not set
- set_fact:
- openshift_registry_url: "docker-registry.ops.rhcloud.com/openshift3_beta/ose-${component}:${version}"
- when: openshift.common.deployment_type == 'online' and openshift_registry_url is not defined
-
-- name: Create node config
+- name: Generate the node client config
command: >
- /usr/bin/openshift admin create-node-config
- --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}
- --node={{ item.openshift.common.hostname }}
- --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
- --dns-domain={{ openshift.dns.domain }}
- --dns-ip={{ openshift.dns.ip }}
+ {{ openshift.common.admin_binary }} create-api-client-config
+ --certificate-authority={{ openshift_master_ca_cert }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}
+ --groups=system:nodes
--master={{ openshift.master.api_url }}
+ --signer-cert={{ openshift_master_ca_cert }}
--signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
+ --user=system:node-{{ item.openshift.common.hostname }}
+ args:
+ chdir: "{{ openshift_generated_configs_dir }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
+ with_items: nodes_needing_certs
+
+- name: Generate the node server certificate
+ delegate_to: "{{ openshift_first_master }}"
+ command: >
+ {{ openshift.common.admin_binary }} create-server-cert
+ --cert=server.crt --key=server.key --overwrite=true
+ --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }}
--signer-cert={{ openshift_master_ca_cert }}
- --certificate-authority={{ openshift_master_ca_cert }}
- --signer-serial={{ openshift_master_ca_dir }}/serial.txt
- --node-client-certificate-authority={{ openshift_master_ca_cert }}
- {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }}
- --listen=https://0.0.0.0:10250
+ --signer-key={{ openshift_master_ca_key }}
+ --signer-serial={{ openshift_master_ca_serial }}
args:
- chdir: "{{ openshift_cert_parent_dir }}"
- creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: openshift_nodes
+ chdir: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
+ with_items: nodes_needing_certs
- name: Register unregistered nodes
kubernetes_register_node:
- kubectl_cmd: ['osc']
+ kubectl_cmd: "{{ [openshift.common.client_binary] }}"
default_client_config: '~/.config/openshift/.config'
name: "{{ item.openshift.common.hostname }}"
api_version: "{{ openshift_kube_api_version }}"
@@ -49,8 +46,5 @@
host_ip: "{{ item.openshift.common.ip }}"
labels: "{{ item.openshift.node.labels | default({}) }}"
annotations: "{{ item.openshift.node.annotations | default({}) }}"
- external_id: "{{ item.openshift.node.external_id }}"
- # TODO: support customizing other attributes such as: client_config,
- # client_cluster, client_context, client_user
with_items: openshift_nodes
register: register_result
diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml
index bd497f08f..3801b8427 100644
--- a/roles/openshift_register_nodes/vars/main.yml
+++ b/roles/openshift_register_nodes/vars/main.yml
@@ -1,7 +1,8 @@
---
-openshift_cert_parent_dir: /var/lib/openshift
-openshift_cert_relative_dir: openshift.local.certificates
-openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}"
-openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca"
-openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt"
-openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key"
+openshift_node_config_dir: /etc/openshift/node
+openshift_master_config_dir: /etc/openshift/master
+openshift_generated_configs_dir: /etc/openshift/generated-configs
+openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
+openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
+openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+openshift_kube_api_version: v1beta3
diff --git a/roles/openshift_registry/README.md b/roles/openshift_registry/README.md
new file mode 100644
index 000000000..202c818b8
--- /dev/null
+++ b/roles/openshift_registry/README.md
@@ -0,0 +1,42 @@
+OpenShift Container Docker Registry
+===================================
+
+OpenShift Docker Registry service installation
+
+Requirements
+------------
+
+Running OpenShift cluster
+
+Role Variables
+--------------
+
+From this role:
+| Name | Default value | |
+|--------------------|-------------------------------------------------------|---------------------|
+| | | |
+
+From openshift_common:
+| Name | Default value | |
+|-----------------------|---------------|--------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Red Hat openshift@redhat.com
+
diff --git a/roles/openshift_registry/handlers/main.yml b/roles/openshift_registry/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_registry/handlers/main.yml
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_registry/meta/main.yml
index ffe10f836..93b6797d1 100644
--- a/roles/openshift_sdn_node/meta/main.yml
+++ b/roles/openshift_registry/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: OpenShift SDN Node
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Docker Registry
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.7
@@ -11,5 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_common }
diff --git a/roles/openshift_registry/tasks/main.yml b/roles/openshift_registry/tasks/main.yml
new file mode 100644
index 000000000..29387d7d5
--- /dev/null
+++ b/roles/openshift_registry/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: _oreg_images="--images={{ oreg_url|quote }}"
+ when: oreg_url is defined
+
+- name: Deploy OpenShift Registry
+ command: >
+ {{ openshift.common.admin_binary }} registry
+ --create
+ --credentials={{ openshift_master_config_dir }}/openshift-registry.kubeconfig {{ _oreg_images|default() }}
+ register: _oreg_results
+ changed_when: "'service exists' not in _oreg_results.stdout"
diff --git a/roles/openshift_registry/vars/main.yml b/roles/openshift_registry/vars/main.yml
new file mode 100644
index 000000000..9fb501e85
--- /dev/null
+++ b/roles/openshift_registry/vars/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_master_config_dir: /etc/openshift/master
+
diff --git a/roles/openshift_repos/files/online/repos/enterprise-v3.repo b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
index d324c142a..69c480f0a 100644
--- a/roles/openshift_repos/files/online/repos/enterprise-v3.repo
+++ b/roles/openshift_repos/files/online/repos/enterprise-v3.repo
@@ -1,10 +1,10 @@
[enterprise-v3]
-name=OpenShift Enterprise Beta3
-baseurl=https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta3/
- https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta3/
+name=OpenShift Enterprise Beta4
+baseurl=https://mirror.ops.rhcloud.com/libra/libra-7-ose-beta4/
+ https://gce-mirror1.ops.rhcloud.com/libra/libra-7-ose-beta4/
enabled=1
gpgcheck=0
failovermethod=priority
sslverify=False
sslclientcert=/var/lib/yum/client-cert.pem
-sslclientkey=/var/lib/yum/client-key.pem \ No newline at end of file
+sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/openshift_sdn_master/README.md b/roles/openshift_router/README.md
index d0dcf6d11..6d8ee25c6 100644
--- a/roles/openshift_sdn_master/README.md
+++ b/roles/openshift_router/README.md
@@ -1,20 +1,20 @@
-OpenShift SDN Master
-====================
+OpenShift Container Router
+==========================
-OpenShift SDN Master service installation
+OpenShift Router service installation
Requirements
------------
-A host with the openshift_master role applied
+Running OpenShift cluster
Role Variables
--------------
From this role:
-| Name | Default value | |
-|----------------------------------|-----------------------|--------------------------------------------------|
-| openshift_sdn_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| Name | Default value | |
+|--------------------|-------------------------------------------------------|---------------------|
+| | | |
From openshift_common:
| Name | Default value | |
@@ -24,7 +24,6 @@ From openshift_common:
Dependencies
------------
-
Example Playbook
----------------
@@ -38,4 +37,5 @@ Apache License, Version 2.0
Author Information
------------------
-TODO
+Red Hat openshift@redhat.com
+
diff --git a/roles/openshift_router/handlers/main.yml b/roles/openshift_router/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_router/handlers/main.yml
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_router/meta/main.yml
index 5de32cc13..0471e5e14 100644
--- a/roles/openshift_sdn_master/meta/main.yml
+++ b/roles/openshift_router/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
- author: Jason DeTiberus
- description: OpenShift SDN Master
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Router
company: Red Hat, Inc.
license: Apache License, Version 2.0
min_ansible_version: 1.7
@@ -11,5 +11,3 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies:
-- { role: openshift_common }
diff --git a/roles/openshift_router/tasks/main.yml b/roles/openshift_router/tasks/main.yml
new file mode 100644
index 000000000..929177262
--- /dev/null
+++ b/roles/openshift_router/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- set_fact: _ortr_images="--images={{ oreg_url|quote }}"
+ when: oreg_url is defined
+
+- name: Deploy OpenShift Router
+ command: >
+ {{ openshift.common.admin_binary }} router
+ --create
+ --credentials={{ openshift_master_config_dir }}/openshift-router.kubeconfig {{ _ortr_images|default() }}
+ register: _ortr_results
+ changed_when: "'service exists' not in _ortr_results.stdout"
diff --git a/roles/openshift_router/vars/main.yml b/roles/openshift_router/vars/main.yml
new file mode 100644
index 000000000..9fb501e85
--- /dev/null
+++ b/roles/openshift_router/vars/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_master_config_dir: /etc/openshift/master
+
diff --git a/roles/openshift_sdn_master/handlers/main.yml b/roles/openshift_sdn_master/handlers/main.yml
deleted file mode 100644
index cd645f2c5..000000000
--- a/roles/openshift_sdn_master/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart openshift-sdn-master
- service: name=openshift-sdn-master state=restarted
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
deleted file mode 100644
index 77e7a80ba..000000000
--- a/roles/openshift_sdn_master/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been
-# started yet
-
-- name: Set master sdn OpenShift facts
- openshift_facts:
- role: 'master_sdn'
- local_facts:
- debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}"
-
-- name: Install openshift-sdn-master
- yum:
- pkg: openshift-sdn-master
- state: installed
- register: install_result
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: install_result | changed
-
-# TODO: we should probably generate certs specifically for sdn
-- name: Configure openshift-sdn-master settings
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-master
- regexp: '^OPTIONS='
- line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}}
- -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt
- -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt
- -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\""
- notify:
- - restart openshift-sdn-master
-
-- name: Enable openshift-sdn-master
- service:
- name: openshift-sdn-master
- enabled: yes
- state: started
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
deleted file mode 100644
index e6b6a9503..000000000
--- a/roles/openshift_sdn_node/README.md
+++ /dev/null
@@ -1,44 +0,0 @@
-OpenShift SDN Node
-==================
-
-OpenShift SDN Node service installation
-
-Requirements
-------------
-
-A host with the openshift_node role applied
-
-Role Variables
---------------
-
-From this role:
-| Name | Default value | |
-|--------------------------------|-----------------------|--------------------------------------------------|
-| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
-
-
-From openshift_common:
-| Name | Default value | |
-|-------------------------------|---------------------|----------------------------------------|
-| openshift_debug_level | 0 | Global openshift debug log verbosity |
-| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
-| openshift_hostname | UNDEF (Required) | hostname to use for this instance |
-
-Dependencies
-------------
-
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-TODO
diff --git a/roles/openshift_sdn_node/handlers/main.yml b/roles/openshift_sdn_node/handlers/main.yml
deleted file mode 100644
index 402d82149..000000000
--- a/roles/openshift_sdn_node/handlers/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: restart openshift-sdn-node
- service: name=openshift-sdn-node state=restarted
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
deleted file mode 100644
index 37a30d019..000000000
--- a/roles/openshift_sdn_node/tasks/main.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Set node sdn OpenShift facts
- openshift_facts:
- role: 'node_sdn'
- local_facts:
- debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}"
-
-- name: Install openshift-sdn-node
- yum:
- pkg: openshift-sdn-node
- state: installed
- register: install_result
-
-- name: Reload systemd units
- command: systemctl daemon-reload
- when: install_result | changed
-
-# TODO: we are specifying -hostname= for OPTIONS as a workaround for
-# openshift-sdn-node not properly detecting the hostname.
-# TODO: we should probably generate certs specifically for sdn
-- name: Configure openshift-sdn-node settings
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- backrefs: yes
- with_items:
- - regex: '^(OPTIONS=)'
- line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}
- -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt
- -etcd-certfile={{ openshift_node_cert_dir }}/client.crt
- -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"'
- - regex: '^(MASTER_URL=)'
- line: '\1"{{ openshift_sdn_master_url }}"'
- - regex: '^(MINION_IP=)'
- line: '\1"{{ openshift.common.ip }}"'
- notify: restart openshift-sdn-node
-
-- name: Ensure we aren't setting DOCKER_OPTIONS in /etc/sysconfig/openshift-sdn-node
- lineinfile:
- dest: /etc/sysconfig/openshift-sdn-node
- regexp: '^DOCKER_OPTIONS='
- state: absent
- notify: restart openshift-sdn-node
-
-# TODO lock down the insecure-registry config to a more sane value than
-# 0.0.0.0/0
-- name: Configure docker insecure-registry setting
- lineinfile:
- dest: /etc/sysconfig/docker
- regexp: INSECURE_REGISTRY=
- line: INSECURE_REGISTRY='--insecure-registry=0.0.0.0/0'
- notify: restart openshift-sdn-node
-
-
-- name: Start and enable openshift-sdn-node
- service:
- name: openshift-sdn-node
- enabled: yes
- state: started
diff --git a/roles/os_zabbix/library/zbxapi.py b/roles/os_zabbix/library/zbxapi.py
index f4f52909b..b5fa5ee2b 100755
--- a/roles/os_zabbix/library/zbxapi.py
+++ b/roles/os_zabbix/library/zbxapi.py
@@ -1,4 +1,8 @@
#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+ ZabbixAPI ansible module
+'''
# Copyright 2015 Red Hat Inc.
#
@@ -17,11 +21,22 @@
# Purpose: An ansible module to communicate with zabbix.
#
+# pylint: disable=line-too-long
+# Disabling line length for readability
+
import json
import httplib2
import sys
import os
import re
+import copy
+
+class ZabbixAPIError(Exception):
+ '''
+ ZabbixAPIError
+ Exists to propagate errors up from the api
+ '''
+ pass
class ZabbixAPI(object):
'''
@@ -69,23 +84,26 @@ class ZabbixAPI(object):
'Usermedia': ['get'],
}
- def __init__(self, data={}):
- self.server = data['server'] or None
- self.username = data['user'] or None
- self.password = data['password'] or None
- if any(map(lambda value: value == None, [self.server, self.username, self.password])):
+ def __init__(self, data=None):
+ if not data:
+ data = {}
+ self.server = data.get('server', None)
+ self.username = data.get('user', None)
+ self.password = data.get('password', None)
+ if any([value == None for value in [self.server, self.username, self.password]]):
print 'Please specify zabbix server url, username, and password.'
sys.exit(1)
- self.verbose = data.has_key('verbose')
+ self.verbose = data.get('verbose', False)
self.use_ssl = data.has_key('use_ssl')
self.auth = None
- for class_name, method_names in self.classes.items():
- #obj = getattr(self, class_name)(self)
- #obj.__dict__
- setattr(self, class_name.lower(), getattr(self, class_name)(self))
+ for cname, _ in self.classes.items():
+ setattr(self, cname.lower(), getattr(self, cname)(self))
+ # pylint: disable=no-member
+ # This method does not exist until the metaprogramming executed
+ # This is permanently disabled.
results = self.user.login(user=self.username, password=self.password)
if results[0]['status'] == '200':
@@ -98,48 +116,40 @@ class ZabbixAPI(object):
print "Error in call to zabbix. Http status: {0}.".format(results[0]['status'])
sys.exit(1)
- def perform(self, method, params):
+ def perform(self, method, rpc_params):
'''
This method calls your zabbix server.
It requires the following parameters in order for a proper request to be processed:
-
- jsonrpc - the version of the JSON-RPC protocol used by the API; the Zabbix API implements JSON-RPC version 2.0;
+ jsonrpc - the version of the JSON-RPC protocol used by the API;
+ the Zabbix API implements JSON-RPC version 2.0;
method - the API method being called;
- params - parameters that will be passed to the API method;
+ rpc_params - parameters that will be passed to the API method;
id - an arbitrary identifier of the request;
auth - a user authentication token; since we don't have one yet, it's set to null.
'''
http_method = "POST"
- if params.has_key("http_method"):
- http_method = params['http_method']
-
jsonrpc = "2.0"
- if params.has_key('jsonrpc'):
- jsonrpc = params['jsonrpc']
-
rid = 1
- if params.has_key('id'):
- rid = params['id']
http = None
if self.use_ssl:
http = httplib2.Http()
else:
- http = httplib2.Http( disable_ssl_certificate_validation=True,)
+ http = httplib2.Http(disable_ssl_certificate_validation=True,)
- headers = params.get('headers', {})
+ headers = {}
headers["Content-type"] = "application/json"
body = {
"jsonrpc": jsonrpc,
"method": method,
- "params": params,
+ "params": rpc_params.get('params', {}),
"id": rid,
'auth': self.auth,
}
- if method in ['user.login','api.version']:
+ if method in ['user.login', 'api.version']:
del body['auth']
body = json.dumps(body)
@@ -150,48 +160,70 @@ class ZabbixAPI(object):
print headers
httplib2.debuglevel = 1
- response, results = http.request(self.server, http_method, body, headers)
+ response, content = http.request(self.server, http_method, body, headers)
+
+ if response['status'] not in ['200', '201']:
+ raise ZabbixAPIError('Error calling zabbix. Zabbix returned %s' % response['status'])
if self.verbose:
print response
- print results
+ print content
try:
- results = json.loads(results)
- except ValueError as e:
- results = {"error": e.message}
+ content = json.loads(content)
+ except ValueError as err:
+ content = {"error": err.message}
- return response, results
+ return response, content
- '''
- This bit of metaprogramming is where the ZabbixAPI subclasses are created.
- For each of ZabbixAPI.classes we create a class from the key and methods
- from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
- to each subclass in order for each to be able to call the perform method.
- '''
@staticmethod
- def meta(class_name, method_names):
- # This meta method allows a class to add methods to it.
- def meta_method(Class, method_name):
+ def meta(cname, method_names):
+ '''
+ This bit of metaprogramming is where the ZabbixAPI subclasses are created.
+ For each of ZabbixAPI.classes we create a class from the key and methods
+ from the ZabbixAPI.classes values. We pass a reference to ZabbixAPI class
+ to each subclass in order for each to be able to call the perform method.
+ '''
+ def meta_method(_class, method_name):
+ '''
+ This meta method allows a class to add methods to it.
+ '''
# This template method is a stub method for each of the subclass
# methods.
- def template_method(self, **params):
- return self.parent.perform(class_name.lower()+"."+method_name, params)
- template_method.__doc__ = "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % (class_name.lower(), method_name)
+ def template_method(self, params=None, **rpc_params):
+ '''
+ This template method is a stub method for each of the subclass methods.
+ '''
+ if params:
+ rpc_params['params'] = params
+ else:
+ rpc_params['params'] = copy.deepcopy(rpc_params)
+
+ return self.parent.perform(cname.lower()+"."+method_name, rpc_params)
+
+ template_method.__doc__ = \
+ "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s/%s" % \
+ (cname.lower(), method_name)
template_method.__name__ = method_name
# this is where the template method is placed inside of the subclass
# e.g. setattr(User, "create", stub_method)
- setattr(Class, template_method.__name__, template_method)
+ setattr(_class, template_method.__name__, template_method)
# This class call instantiates a subclass. e.g. User
- Class=type(class_name, (object,), { '__doc__': "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % class_name.lower() })
- # This init method gets placed inside of the Class
- # to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
- # is passed in to allow each class access to the perform method.
+ _class = type(cname,
+ (object,),
+ {'__doc__': \
+ "https://www.zabbix.com/documentation/2.4/manual/api/reference/%s" % cname.lower()})
def __init__(self, parent):
+ '''
+ This init method gets placed inside of the _class
+ to allow it to be instantiated. A reference to the parent class(ZabbixAPI)
+ is passed in to allow each class access to the perform method.
+ '''
self.parent = parent
+
# This attaches the init to the subclass. e.g. Create
- setattr(Class, __init__.__name__, __init__)
+ setattr(_class, __init__.__name__, __init__)
# For each of our ZabbixAPI.classes dict values
# Create a method and attach it to our subclass.
# e.g. 'User': ['delete', 'get', 'updatemedia', 'updateprofile',
@@ -200,25 +232,54 @@ class ZabbixAPI(object):
# User.delete
# User.get
for method_name in method_names:
- meta_method(Class, method_name)
+ meta_method(_class, method_name)
# Return our subclass with all methods attached
- return Class
+ return _class
# Attach all ZabbixAPI.classes to ZabbixAPI class through metaprogramming
-for class_name, method_names in ZabbixAPI.classes.items():
- setattr(ZabbixAPI, class_name, ZabbixAPI.meta(class_name, method_names))
+for _class_name, _method_names in ZabbixAPI.classes.items():
+ setattr(ZabbixAPI, _class_name, ZabbixAPI.meta(_class_name, _method_names))
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def diff_content(from_zabbix, from_user):
+ ''' Compare passed in object to results returned from zabbix
+ '''
+ terms = ['search', 'output', 'groups', 'select', 'expand']
+ regex = '(' + '|'.join(terms) + ')'
+ retval = {}
+ for key, value in from_user.items():
+ if re.findall(regex, key):
+ continue
+
+ if from_zabbix[key] != str(value):
+ retval[key] = str(value)
+
+ return retval
def main():
+ '''
+ This main method runs the ZabbixAPI Ansible Module
+ '''
module = AnsibleModule(
- argument_spec = dict(
+ argument_spec=dict(
server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
user=dict(default=None, type='str'),
password=dict(default=None, type='str'),
zbx_class=dict(choices=ZabbixAPI.classes.keys()),
- action=dict(default=None, type='str'),
params=dict(),
debug=dict(default=False, type='bool'),
+ state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
@@ -227,47 +288,83 @@ def main():
if not user:
user = os.environ['ZABBIX_USER']
- pw = module.params.get('password', None)
- if not pw:
- pw = os.environ['ZABBIX_PASSWORD']
+ passwd = module.params.get('password', None)
+ if not passwd:
+ passwd = os.environ['ZABBIX_PASSWORD']
- server = module.params['server']
- if module.params['debug']:
- options['debug'] = True
api_data = {
'user': user,
- 'password': pw,
- 'server': server,
+ 'password': passwd,
+ 'server': module.params['server'],
+ 'verbose': module.params['debug']
}
- if not user or not pw or not server:
- module.fail_json('Please specify the user, password, and the zabbix server.')
+ if not user or not passwd or not module.params['server']:
+ module.fail_json(msg='Please specify the user, password, and the zabbix server.')
zapi = ZabbixAPI(api_data)
zbx_class = module.params.get('zbx_class')
- action = module.params.get('action')
- params = module.params.get('params', {})
-
+ rpc_params = module.params.get('params', {})
+ state = module.params.get('state')
# Get the instance we are trying to call
zbx_class_inst = zapi.__getattribute__(zbx_class.lower())
- # Get the instance's method we are trying to call
- zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__[action]
- # Make the call with the incoming params
- results = zbx_action_method(zbx_class_inst, **params)
-
- # Results Section
- changed_state = False
- status = results[0]['status']
- if status not in ['200', '201']:
- #changed_state = False
- module.fail_json(msg="Http response: [%s] - Error: %s" % (str(results[0]), results[1]))
- module.exit_json(**{'results': results[1]['result']})
+ # perform get
+ # Get the instance's method we are trying to call
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['get']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+ # If we are coming from a query, we need to pass in the correct rpc_params for delete.
+ # specifically the zabbix class name + 'id'
+ # if rpc_params is a list then we need to pass it. (list of ids to delete)
+ idname = zbx_class.lower() + "id"
+ if not isinstance(rpc_params, list) and content['result'][0].has_key(idname):
+ rpc_params = [content['result'][0][idname]]
+
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['delete']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ if state == 'present':
+ # It's not there, create it!
+ if not exists(content):
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['create']
+ _, content = zbx_action_method(zbx_class_inst, rpc_params)
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+ # It's there and the same, do nothing!
+ diff_params = diff_content(content['result'][0], rpc_params)
+ if not diff_params:
+ module.exit_json(changed=False, results=content['result'], state="present")
+
+ # Add the id to update with
+ idname = zbx_class.lower() + "id"
+ diff_params[idname] = content['result'][0][idname]
+
+
+ ## It's there and not the same, update it!
+ zbx_action_method = zapi.__getattribute__(zbx_class.capitalize()).__dict__['update']
+ _, content = zbx_action_method(zbx_class_inst, diff_params)
+ module.exit_json(changed=True, results=content, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
from ansible.module_utils.basic import *
main()
diff --git a/roles/pods/meta/main.yml b/roles/pods/meta/main.yml
index c5c362c60..bddf14bb2 100644
--- a/roles/pods/meta/main.yml
+++ b/roles/pods/meta/main.yml
@@ -1,7 +1,7 @@
---
galaxy_info:
author: your name
- description:
+ description:
company: your company (optional)
# Some suggested licenses:
# - BSD (default)
@@ -14,7 +14,7 @@ galaxy_info:
min_ansible_version: 1.2
#
# Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
+ # the ones that apply to your role. If you don't see your
# platform on this list, let us know and we'll get it added!
#
#platforms:
@@ -121,4 +121,4 @@ dependencies: []
# dependencies available via galaxy should be listed here.
# Be sure to remove the '[]' above if you add dependencies
# to this list.
-
+