summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md8
-rw-r--r--README_AWS.md5
-rw-r--r--bin/zsh_functions/_ossh49
-rw-r--r--inventory/aws/ec2.ini7
-rwxr-xr-xinventory/aws/ec2.py312
-rw-r--r--lib/aws_command.rb4
-rw-r--r--lib/aws_helper.rb2
-rw-r--r--lib/gce_command.rb18
-rw-r--r--lib/gce_helper.rb32
-rw-r--r--playbooks/aws/ansible-tower/config.yml22
l---------playbooks/aws/ansible-tower/filter_plugins1
-rw-r--r--playbooks/aws/ansible-tower/launch.yml78
l---------playbooks/aws/ansible-tower/roles1
-rw-r--r--playbooks/aws/ansible-tower/user_data.txt6
-rw-r--r--playbooks/aws/ansible-tower/vars.ops.yml9
-rw-r--r--playbooks/aws/ansible-tower/vars.yml1
-rw-r--r--playbooks/aws/openshift-master/config.yml20
l---------playbooks/aws/openshift-master/filter_plugins1
l---------playbooks/aws/openshift-master/roles1
-rw-r--r--playbooks/aws/openshift-master/vars.yml2
-rw-r--r--playbooks/aws/openshift-node/config.yml21
l---------playbooks/aws/openshift-node/filter_plugins1
l---------playbooks/aws/openshift-node/roles1
-rw-r--r--playbooks/aws/openshift-node/vars.yml2
-rw-r--r--playbooks/aws/os2-atomic-proxy/config.yml4
l---------playbooks/aws/os2-atomic-proxy/filter_plugins1
l---------playbooks/aws/os2-atomic-proxy/roles1
-rw-r--r--playbooks/gce/openshift-master/config.yml21
l---------playbooks/gce/openshift-master/filter_plugins1
l---------playbooks/gce/openshift-master/roles1
-rw-r--r--playbooks/gce/openshift-master/vars.yml2
-rw-r--r--playbooks/gce/openshift-node/config.yml28
l---------playbooks/gce/openshift-node/filter_plugins1
-rw-r--r--playbooks/gce/openshift-node/launch.yml10
l---------playbooks/gce/openshift-node/roles1
-rw-r--r--playbooks/gce/openshift-node/vars.yml2
-rw-r--r--roles/ansible/tasks/main.yaml7
-rw-r--r--roles/ansible_tower/tasks/main.yaml27
-rw-r--r--roles/atomic_base/defaults/main.yml2
-rw-r--r--roles/atomic_base/handlers/main.yml2
-rw-r--r--roles/atomic_proxy/defaults/main.yml2
-rw-r--r--roles/base_os/tasks/main.yaml31
-rw-r--r--roles/docker/defaults/main.yml2
-rw-r--r--roles/docker/handlers/main.yml2
-rw-r--r--roles/docker/tasks/main.yml9
-rw-r--r--roles/docker/vars/main.yml2
-rw-r--r--roles/etcd/defaults/main.yml2
-rw-r--r--roles/etcd/vars/main.yml2
-rw-r--r--roles/kubernetes_apiserver/README.md38
-rw-r--r--roles/kubernetes_apiserver/defaults/main.yml2
-rw-r--r--roles/kubernetes_apiserver/handlers/main.yml4
-rw-r--r--roles/kubernetes_apiserver/meta/main.yml124
-rw-r--r--roles/kubernetes_apiserver/tasks/main.yml25
-rw-r--r--roles/kubernetes_apiserver/vars/main.yml2
-rw-r--r--roles/kubernetes_controller_manager/README.md38
-rw-r--r--roles/kubernetes_controller_manager/defaults/main.yml2
-rw-r--r--roles/kubernetes_controller_manager/handlers/main.yml5
-rw-r--r--roles/kubernetes_controller_manager/meta/main.yml124
-rw-r--r--roles/kubernetes_controller_manager/tasks/main.yml7
-rw-r--r--roles/kubernetes_controller_manager/vars/main.yml2
-rw-r--r--roles/kubernetes_kubelet/README.md38
-rw-r--r--roles/kubernetes_kubelet/defaults/main.yml2
-rw-r--r--roles/kubernetes_kubelet/files/kubelet.service10
-rw-r--r--roles/kubernetes_kubelet/handlers/main.yml4
-rw-r--r--roles/kubernetes_kubelet/meta/main.yml124
-rw-r--r--roles/kubernetes_kubelet/tasks/main.yml31
-rw-r--r--roles/kubernetes_kubelet/templates/cadvisor.manifest33
-rw-r--r--roles/kubernetes_kubelet/templates/kubelet3
-rw-r--r--roles/kubernetes_kubelet/vars/main.yml2
-rw-r--r--roles/kubernetes_proxy/README.md38
-rw-r--r--roles/kubernetes_proxy/defaults/main.yml2
-rw-r--r--roles/kubernetes_proxy/handlers/main.yml4
-rw-r--r--roles/kubernetes_proxy/meta/main.yml124
-rw-r--r--roles/kubernetes_proxy/tasks/main.yml17
-rw-r--r--roles/kubernetes_proxy/vars/main.yml2
-rw-r--r--roles/openshift_common/README.md42
-rw-r--r--roles/openshift_common/defaults/main.yml8
-rw-r--r--roles/openshift_common/meta/main.yml15
-rw-r--r--roles/openshift_common/tasks/main.yml21
-rw-r--r--roles/openshift_common/tasks/set_facts.yml9
-rw-r--r--roles/openshift_common/vars/main.yml6
-rw-r--r--roles/openshift_master/README.md38
-rw-r--r--roles/openshift_master/defaults/main.yml16
-rw-r--r--roles/openshift_master/handlers/main.yml2
-rw-r--r--roles/openshift_master/meta/main.yml135
-rw-r--r--roles/openshift_master/tasks/main.yml73
-rw-r--r--roles/openshift_master/vars/main.yml2
-rw-r--r--roles/openshift_node/README.md41
-rw-r--r--roles/openshift_node/defaults/main.yml6
-rw-r--r--roles/openshift_node/handlers/main.yml2
-rw-r--r--roles/openshift_node/library/openshift_register_node.py205
-rw-r--r--roles/openshift_node/meta/main.yml135
-rw-r--r--roles/openshift_node/tasks/main.yml78
-rw-r--r--roles/openshift_node/vars/main.yml2
-rw-r--r--roles/openshift_sdn_master/README.md41
-rw-r--r--roles/openshift_sdn_master/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_master/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_master/meta/main.yml14
-rw-r--r--roles/openshift_sdn_master/tasks/main.yml26
-rw-r--r--roles/openshift_sdn_node/README.md51
-rw-r--r--roles/openshift_sdn_node/defaults/main.yml2
-rw-r--r--roles/openshift_sdn_node/handlers/main.yml3
-rw-r--r--roles/openshift_sdn_node/meta/main.yml14
-rw-r--r--roles/openshift_sdn_node/tasks/main.yml41
-rw-r--r--roles/os_env_extras/files/irbrc (renamed from roles/base_os/files/irbrc)0
-rw-r--r--roles/os_env_extras/files/vimrc (renamed from roles/base_os/files/vimrc)0
-rw-r--r--roles/os_env_extras/tasks/main.yaml17
-rw-r--r--roles/os_firewall/README.md66
-rw-r--r--roles/os_firewall/defaults/main.yml2
-rw-r--r--roles/os_firewall/library/os_firewall_manage_iptables.py254
-rw-r--r--roles/os_firewall/meta/main.yml13
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml75
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml60
-rw-r--r--roles/os_firewall/tasks/main.yml6
-rw-r--r--roles/os_ipv6_disable/tasks/main.yaml11
-rw-r--r--roles/pods/defaults/main.yml2
-rw-r--r--roles/pods/handlers/main.yml2
-rw-r--r--roles/pods/vars/main.yml2
-rw-r--r--roles/repos/defaults/main.yaml5
-rw-r--r--roles/repos/files/online/RPM-GPG-KEY-redhat-beta61
-rw-r--r--roles/repos/files/online/RPM-GPG-KEY-redhat-release (renamed from roles/repos/files/RPM-GPG-KEY-redhat-release)0
-rw-r--r--roles/repos/files/online/epel7-kubernetes.repo (renamed from roles/repos/files/epel7-kubernetes.repo)0
-rw-r--r--roles/repos/files/online/epel7-openshift.repo (renamed from roles/repos/files/epel7-openshift.repo)0
-rw-r--r--roles/repos/files/online/oso-rhui-rhel-7-extras.repo (renamed from roles/repos/files/oso-rhui-rhel-7-extras.repo)0
-rw-r--r--roles/repos/files/online/oso-rhui-rhel-7-server.repo (renamed from roles/repos/files/oso-rhui-rhel-7-server.repo)4
-rw-r--r--roles/repos/files/online/rhel-7-libra-candidate.repo (renamed from roles/repos/files/rhel-7-libra-candidate.repo)0
-rw-r--r--roles/repos/tasks/main.yaml47
-rw-r--r--roles/repos/templates/yum_repo.j215
-rw-r--r--roles/repos/vars/main.yml2
129 files changed, 1930 insertions, 1321 deletions
diff --git a/README.md b/README.md
index 2aa0186f7..a4b708fd9 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,13 @@ Setup
```
yum install -y ansible rubygem-thor rubygem-parseconfig util-linux
```
-
+ - OSX:
+ ```
+ # Install ansible and python 2
+ brew install ansible python
+ # Required ruby gems
+ gem install thor parseconfig
+ ```
- Setup for a specific cloud:
- [AWS](README_AWS.md)
- [GCE](README_GCE.md)
diff --git a/README_AWS.md b/README_AWS.md
index c0f2bce75..fa1ec61ce 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -38,9 +38,14 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
Install Dependencies
--------------------
1. Ansible requires python-boto for aws operations:
+RHEL/CentOS/Fedora
```
yum install -y ansible python-boto
```
+OSX:
+```
+ pip install -U boto
+```
Test The Setup
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
new file mode 100644
index 000000000..7c6cb7b0b
--- /dev/null
+++ b/bin/zsh_functions/_ossh
@@ -0,0 +1,49 @@
+#compdef ossh oscp
+
+_ossh_known_hosts(){
+ if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+ print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+ fi
+}
+
+_ossh(){
+ local curcontext="$curcontext" state line
+ typeset -A opt_args
+
+ common_arguments=(
+ '(- *)'{-h,--help}'[show help]' \
+ {-v,--verbose}'[enable verbose]' \
+ {-d,--debug}'[debug mode]' \
+ {-l,--login_name}+'[login name]:login_name' \
+ {-c,--command}+'[command to run on remote host]:command' \
+ {-o,--ssh_opts}+'[SSH Options to pass to SSH]:ssh options' \
+ {-e,--env}+'[environtment to use]:environment:->env' \
+ '--list[list out hosts]' \
+ ':OP Hosts:->oo_hosts'
+ )
+
+ case "$service" in
+ ossh)
+ _arguments -C -s \
+ "$common_arguments[@]" \
+ ;;
+
+ oscp)
+ _arguments -C -s \
+ "$common_arguments[@]" \
+ {-r,--recurse}'[Recursive copy]' \
+ ':file:_files'
+ ;;
+ esac
+
+ case "$state" in
+ oo_hosts)
+ _values 'oo_hosts' $(_ossh_known_hosts)
+ ;;
+ env)
+ _values 'environment' ops int stg prod
+ ;;
+ esac
+}
+
+_ossh "$@"
diff --git a/inventory/aws/ec2.ini b/inventory/aws/ec2.ini
index 8a0c3ad45..eaab0a410 100644
--- a/inventory/aws/ec2.ini
+++ b/inventory/aws/ec2.ini
@@ -53,3 +53,10 @@ cache_path = ~/.ansible/tmp
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 300
+
+# These two settings allow flexible ansible host naming based on a format
+# string and a comma-separated list of ec2 tags. The tags used must be
+# present for all instances, or the code will fail. This overrides both
+# destination_variable and vpc_destination_variable.
+# destination_format = {0}.{1}.rhcloud.com
+# destination_format_tags = Name,environment
diff --git a/inventory/aws/ec2.py b/inventory/aws/ec2.py
index 9a99d91ea..f231ff4c2 100755
--- a/inventory/aws/ec2.py
+++ b/inventory/aws/ec2.py
@@ -123,6 +123,7 @@ from boto import ec2
from boto import rds
from boto import route53
import ConfigParser
+from collections import defaultdict
try:
import json
@@ -215,6 +216,14 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+ if config.has_option('ec2', 'destination_format') and \
+ config.has_option('ec2', 'destination_format_tags'):
+ self.destination_format = config.get('ec2', 'destination_format')
+ self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
+ else:
+ self.destination_format = None
+ self.destination_format_tags = None
+
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
@@ -222,6 +231,21 @@ class Ec2Inventory(object):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
+ # Include RDS instances?
+ self.rds_enabled = True
+ if config.has_option('ec2', 'rds'):
+ self.rds_enabled = config.getboolean('ec2', 'rds')
+
+ # Return all EC2 and RDS instances (if RDS is enabled)
+ if config.has_option('ec2', 'all_instances'):
+ self.all_instances = config.getboolean('ec2', 'all_instances')
+ else:
+ self.all_instances = False
+ if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
+ self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
+ else:
+ self.all_rds_instances = False
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if not os.path.exists(cache_dir):
@@ -230,8 +254,66 @@ class Ec2Inventory(object):
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
-
+ # Configure nested groups instead of flat namespace.
+ if config.has_option('ec2', 'nested_groups'):
+ self.nested_groups = config.getboolean('ec2', 'nested_groups')
+ else:
+ self.nested_groups = False
+
+ # Configure which groups should be created.
+ group_by_options = [
+ 'group_by_instance_id',
+ 'group_by_region',
+ 'group_by_availability_zone',
+ 'group_by_ami_id',
+ 'group_by_instance_type',
+ 'group_by_key_pair',
+ 'group_by_vpc_id',
+ 'group_by_security_group',
+ 'group_by_tag_keys',
+ 'group_by_tag_none',
+ 'group_by_route53_names',
+ 'group_by_rds_engine',
+ 'group_by_rds_parameter_group',
+ ]
+ for option in group_by_options:
+ if config.has_option('ec2', option):
+ setattr(self, option, config.getboolean('ec2', option))
+ else:
+ setattr(self, option, True)
+
+ # Do we need to just include hosts that match a pattern?
+ try:
+ pattern_include = config.get('ec2', 'pattern_include')
+ if pattern_include and len(pattern_include) > 0:
+ self.pattern_include = re.compile(pattern_include)
+ else:
+ self.pattern_include = None
+ except ConfigParser.NoOptionError, e:
+ self.pattern_include = None
+
+ # Do we need to exclude hosts that match a pattern?
+ try:
+ pattern_exclude = config.get('ec2', 'pattern_exclude');
+ if pattern_exclude and len(pattern_exclude) > 0:
+ self.pattern_exclude = re.compile(pattern_exclude)
+ else:
+ self.pattern_exclude = None
+ except ConfigParser.NoOptionError, e:
+ self.pattern_exclude = None
+
+ # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
+ self.ec2_instance_filters = defaultdict(list)
+ if config.has_option('ec2', 'instance_filters'):
+ for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
+ instance_filter = instance_filter.strip()
+ if not instance_filter or '=' not in instance_filter:
+ continue
+ filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
+ if not filter_key:
+ continue
+ self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
@@ -254,7 +336,8 @@ class Ec2Inventory(object):
for region in self.regions:
self.get_instances_by_region(region)
- self.get_rds_instances_by_region(region)
+ if self.rds_enabled:
+ self.get_rds_instances_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@@ -275,12 +358,18 @@ class Ec2Inventory(object):
if conn is None:
print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
sys.exit(1)
-
- reservations = conn.get_all_instances()
+
+ reservations = []
+ if self.ec2_instance_filters:
+ for filter_key, filter_values in self.ec2_instance_filters.iteritems():
+ reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
+ else:
+ reservations = conn.get_all_instances()
+
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
-
+
except boto.exception.BotoServerError, e:
if not self.eucalyptus:
print "Looks like AWS is down again:"
@@ -288,7 +377,7 @@ class Ec2Inventory(object):
sys.exit(1)
def get_rds_instances_by_region(self, region):
- ''' Makes an AWS API call to the list of RDS instances in a particular
+ ''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
@@ -321,64 +410,124 @@ class Ec2Inventory(object):
for instance in reservation.instances:
return instance
-
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
- # Only want running instances
- if instance.state != 'running':
+ # Only want running instances unless all_instances is True
+ if not self.all_instances and instance.state != 'running':
return
# Select the best destination address
- if instance.subnet_id:
- dest = getattr(instance, self.vpc_destination_variable)
+ if self.destination_format and self.destination_format_tags:
+ dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
+ elif instance.subnet_id:
+ dest = getattr(instance, self.vpc_destination_variable, None)
+ if dest is None:
+ dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
- dest = getattr(instance, self.destination_variable)
+ dest = getattr(instance, self.destination_variable, None)
+ if dest is None:
+ dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # if we only want to include hosts that match a pattern, skip those that don't
+ if self.pattern_include and not self.pattern_include.match(dest):
+ return
+
+ # if we need to exclude hosts that match a pattern, skip those
+ if self.pattern_exclude and self.pattern_exclude.match(dest):
+ return
+
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
- self.inventory[instance.id] = [dest]
+ if self.group_by_instance_id:
+ self.inventory[instance.id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
- self.push(self.inventory, region, dest)
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
- self.push(self.inventory, instance.placement, dest)
+ if self.group_by_availability_zone:
+ self.push(self.inventory, instance.placement, dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, instance.placement)
+ self.push_group(self.inventory, 'zones', instance.placement)
+
+ # Inventory: Group by Amazon Machine Image (AMI) ID
+ if self.group_by_ami_id:
+ ami_id = self.to_safe(instance.image_id)
+ self.push(self.inventory, ami_id, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
- self.push(self.inventory, self.to_safe('type_' + instance.instance_type), dest)
+ if self.group_by_instance_type:
+ type_name = self.to_safe('type_' + instance.instance_type)
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
- if instance.key_name:
- self.push(self.inventory, self.to_safe('key_' + instance.key_name), dest)
-
+ if self.group_by_key_pair and instance.key_name:
+ key_name = self.to_safe('key_' + instance.key_name)
+ self.push(self.inventory, key_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'keys', key_name)
+
+ # Inventory: Group by VPC
+ if self.group_by_vpc_id and instance.vpc_id:
+ vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
+ self.push(self.inventory, vpc_id_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'vpcs', vpc_id_name)
+
# Inventory: Group by security group
- try:
- for group in instance.groups:
- key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, dest)
- except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ if self.group_by_security_group:
+ try:
+ for group in instance.groups:
+ key = self.to_safe("security_group_" + group.name)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+ except AttributeError:
+ print 'Package boto seems a bit older.'
+ print 'Please upgrade boto >= 2.3.0.'
+ sys.exit(1)
# Inventory: Group by tag keys
- for k, v in instance.tags.iteritems():
- key = self.to_safe("tag_" + k + "=" + v)
- self.push(self.inventory, key, dest)
+ if self.group_by_tag_keys:
+ for k, v in instance.tags.iteritems():
+ key = self.to_safe("tag_" + k + "=" + v)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
- if self.route53_enabled:
+ if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'route53', name)
+
+ # Global Tag: instances without tags
+ if self.group_by_tag_none and len(instance.tags) == 0:
+ self.push(self.inventory, 'tag_none', dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
@@ -390,15 +539,11 @@ class Ec2Inventory(object):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
- # Only want available instances
- if instance.status != 'available':
+ # Only want available instances unless all_rds_instances is True
+ if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
- #if instance.subnet_id:
- #dest = getattr(instance, self.vpc_destination_variable)
- #else:
- #dest = getattr(instance, self.destination_variable)
dest = instance.endpoint[0]
if not dest:
@@ -409,36 +554,70 @@ class Ec2Inventory(object):
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
- self.inventory[instance.id] = [dest]
+ if self.group_by_instance_id:
+ self.inventory[instance.id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
- self.push(self.inventory, region, dest)
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
- self.push(self.inventory, instance.availability_zone, dest)
-
+ if self.group_by_availability_zone:
+ self.push(self.inventory, instance.availability_zone, dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, instance.availability_zone)
+ self.push_group(self.inventory, 'zones', instance.availability_zone)
+
# Inventory: Group by instance type
- self.push(self.inventory, self.to_safe('type_' + instance.instance_class), dest)
-
+ if self.group_by_instance_type:
+ type_name = self.to_safe('type_' + instance.instance_class)
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC
+ if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
+ vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
+ self.push(self.inventory, vpc_id_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'vpcs', vpc_id_name)
+
# Inventory: Group by security group
- try:
- if instance.security_group:
- key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, dest)
- except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ if self.group_by_security_group:
+ try:
+ if instance.security_group:
+ key = self.to_safe("security_group_" + instance.security_group.name)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ except AttributeError:
+ print 'Package boto seems a bit older.'
+ print 'Please upgrade boto >= 2.3.0.'
+ sys.exit(1)
# Inventory: Group by engine
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ if self.group_by_rds_engine:
+ self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ if self.group_by_rds_parameter_group:
+ self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
+ self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
@@ -522,8 +701,8 @@ class Ec2Inventory(object):
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
- instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
- instance_vars["ec2_security_group_names"] = ','.join(group_names)
+ instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
+ instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
@@ -544,7 +723,7 @@ class Ec2Inventory(object):
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
- # host migh not exist anymore
+ # host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
@@ -553,14 +732,23 @@ class Ec2Inventory(object):
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
- ''' Pushed an element onto an array that may not have been defined in
+ ''' Push an element onto an array that may not have been defined in
the dict '''
-
- if key in my_dict:
- my_dict[key].append(element);
+ group_info = my_dict.setdefault(key, [])
+ if isinstance(group_info, dict):
+ host_list = group_info.setdefault('hosts', [])
+ host_list.append(element)
else:
- my_dict[key] = [element]
-
+ group_info.append(element)
+
+ def push_group(self, my_dict, key, element):
+ ''' Push a group as a child of another group. '''
+ parent_group = my_dict.setdefault(key, {})
+ if not isinstance(parent_group, dict):
+ parent_group = my_dict[key] = {'hosts': parent_group}
+ child_groups = parent_group.setdefault('children', [])
+ if element not in child_groups:
+ child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
diff --git a/lib/aws_command.rb b/lib/aws_command.rb
index 1205fd5f7..267513f37 100644
--- a/lib/aws_command.rb
+++ b/lib/aws_command.rb
@@ -7,7 +7,7 @@ module OpenShift
module Ops
class AwsCommand < Thor
# WARNING: we do not currently support environments with hyphens in the name
- SUPPORTED_ENVS = %w(prod stg int tint kint test jhonce amint tdint lint)
+ SUPPORTED_ENVS = %w(prod stg int ops twiest gshipley kint test jhonce amint tdint lint jdetiber)
option :type, :required => true, :enum => LaunchHelper.get_aws_host_types,
:desc => 'The host type of the new instances.'
@@ -114,7 +114,7 @@ module OpenShift
desc "ssh", "Ssh to an instance"
def ssh(*ssh_ops, host)
- if host =~ /^([\w\d_.-]+)@([\w\d-_.]+)/
+ if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)/
user = $1
host = $2
end
diff --git a/lib/aws_helper.rb b/lib/aws_helper.rb
index 2e90ba148..4da5d0925 100644
--- a/lib/aws_helper.rb
+++ b/lib/aws_helper.rb
@@ -19,7 +19,7 @@ module OpenShift
retval = []
hosts['_meta']['hostvars'].each do |host, info|
retval << OpenStruct.new({
- :name => info['ec2_tag_Name'],
+ :name => info['ec2_tag_Name'] || 'UNSET',
:env => info['ec2_tag_environment'] || 'UNSET',
:public_ip => info['ec2_ip_address'],
:public_dns => info['ec2_public_dns_name'],
diff --git a/lib/gce_command.rb b/lib/gce_command.rb
index b0a84d27b..214cc1c05 100644
--- a/lib/gce_command.rb
+++ b/lib/gce_command.rb
@@ -10,7 +10,7 @@ module OpenShift
module Ops
class GceCommand < Thor
# WARNING: we do not currently support environments with hyphens in the name
- SUPPORTED_ENVS = %w(prod stg int tint kint test jhonce amint tdint lint)
+ SUPPORTED_ENVS = %w(prod stg int twiest gshipley kint test jhonce amint tdint lint jdetiber)
option :type, :required => true, :enum => LaunchHelper.get_gce_host_types,
:desc => 'The host type of the new instances.'
@@ -120,14 +120,20 @@ module OpenShift
ah.run_playbook("playbooks/gce/#{host_type}/terminate.yml")
end
+ option :env, :required => false, :aliases => '-e', :enum => SUPPORTED_ENVS,
+ :desc => 'The environment to list.'
desc "list", "Lists instances."
def list()
hosts = GceHelper.get_hosts()
+ hosts.delete_if { |h| h.env != options[:env] } unless options[:env].nil?
+
+ fmt_str = "%34s %5s %8s %17s %7s"
+
puts
- puts "Instances"
- puts "---------"
- hosts.each { |k| puts " #{k.name}" }
+ puts fmt_str % ['Name','Env', 'State', 'IP Address', 'Created By']
+ puts fmt_str % ['----','---', '-----', '----------', '----------']
+ hosts.each { |h| puts fmt_str % [h.name, h.env, h.state, h.public_ip, h.created_by ] }
puts
end
@@ -137,7 +143,7 @@ module OpenShift
:desc => 'A relative path where files are written to.'
desc "scp_from", "scp files from an instance"
def scp_from(*ssh_ops, host)
- if host =~ /^([\w\d_.-]+)@([\w\d-_.]+)$/
+ if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)$/
user = $1
host = $2
end
@@ -169,7 +175,7 @@ module OpenShift
desc "ssh", "Ssh to an instance"
def ssh(*ssh_ops, host)
- if host =~ /^([\w\d_.-]+)@([\w\d-_.]+)/
+ if host =~ /^([\w\d_.\-]+)@([\w\d\-_.]+)/
user = $1
host = $2
end
diff --git a/lib/gce_helper.rb b/lib/gce_helper.rb
index 2ff716ce1..19fa00020 100644
--- a/lib/gce_helper.rb
+++ b/lib/gce_helper.rb
@@ -5,23 +5,41 @@ module OpenShift
class GceHelper
MYDIR = File.expand_path(File.dirname(__FILE__))
- def self.get_hosts()
+ def self.get_list()
cmd = "#{MYDIR}/../inventory/gce/gce.py --list"
hosts = %x[#{cmd} 2>&1]
raise "Error: failed to list hosts\n#{hosts}" unless $?.exitstatus == 0
- # invert the hash so that it's key is the host, and values is an array of metadata
- data = {}
- JSON.parse(hosts).each do |key,value|
- value.each { |h| (data[h] ||= []) << key }
+ return JSON.parse(hosts)
+ end
+
+ def self.get_tag(tags, selector)
+ tags.each do |tag|
+ return $1 if tag =~ selector
end
- # For now, we only care about the name. In the future, we may want the other metadata included.
+ return nil
+ end
+
+ def self.get_hosts()
+ hosts = get_list()
+
retval = []
- data.keys.sort.each { |k| retval << OpenStruct.new({ :name => k }) }
+ hosts['_meta']['hostvars'].each do |host, info|
+ retval << OpenStruct.new({
+ :name => info['gce_name'],
+ :env => get_tag(info['gce_tags'], /^env-(\w+)$/) || 'UNSET',
+ :public_ip => info['gce_public_ip'],
+ :state => info['gce_status'],
+ :created_by => get_tag(info['gce_tags'], /^created-by-(\w+)$/) || 'UNSET',
+ })
+ end
+
+ retval.sort_by! { |h| [h.env, h.state, h.name] }
return retval
+
end
def self.get_host_details(host)
diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml
new file mode 100644
index 000000000..efd1b9911
--- /dev/null
+++ b/playbooks/aws/ansible-tower/config.yml
@@ -0,0 +1,22 @@
+---
+- name: "populate oo_hosts_to_config host group if needed"
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Evaluate oo_host_group_exp if it's set
+ add_host: "name={{ item }} groups=oo_hosts_to_config"
+ with_items: "{{ oo_host_group_exp | default(['']) }}"
+ when: oo_host_group_exp is defined
+
+- name: "Configure instances"
+ hosts: oo_hosts_to_config
+ connection: ssh
+ user: root
+ vars_files:
+ - vars.yml
+ - "vars.{{ oo_env }}.yml"
+ roles:
+ - os_ipv6_disable
+ - ansible
+ - ansible_tower
+ - os_env_extras
diff --git a/playbooks/aws/ansible-tower/filter_plugins b/playbooks/aws/ansible-tower/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/ansible-tower/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
new file mode 100644
index 000000000..4c29fa833
--- /dev/null
+++ b/playbooks/aws/ansible-tower/launch.yml
@@ -0,0 +1,78 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+
+ vars:
+ inst_region: us-east-1
+ rhel7_ami: ami-a24e30ca
+ user_data_file: user_data.txt
+
+ vars_files:
+ - vars.yml
+ - "vars.{{ oo_env }}.yml"
+
+ tasks:
+ - name: Launch instances in VPC
+ ec2:
+ state: present
+ region: "{{ inst_region }}"
+ keypair: mmcgrath_libra
+ group_id: "{{ oo_security_group_ids }}"
+ instance_type: c4.xlarge
+ image: "{{ rhel7_ami }}"
+ count: "{{ oo_new_inst_names | oo_len }}"
+ user_data: "{{ lookup('file', user_data_file) }}"
+ wait: yes
+ assign_public_ip: "{{ oo_assign_public_ip }}"
+ vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
+ register: ec2
+
+ - name: Add Name and environment tags to instances
+ ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+ with_together:
+ - oo_new_inst_names
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+ - name: Add other tags to instances
+ ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+ with_items: ec2.instances
+ args:
+ tags: "{{ oo_new_inst_tags }}"
+
+ - name: Add new instances public IPs to oo_hosts_to_config
+ add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
+ with_together:
+ - oo_new_inst_names
+ - ec2.instances
+
+ - debug: var=ec2
+
+ - name: Wait for ssh
+ wait_for: "port=22 host={{ item.public_ip }}"
+ with_items: ec2.instances
+
+ - name: Wait for root user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_items: ec2.instances
+
+- name: Initial setup
+ hosts: oo_hosts_to_config
+ user: root
+ gather_facts: true
+
+ tasks:
+
+ - name: Yum update
+ yum: name=* state=latest
+
+# Apply the configs, seprate so that just the configs can be run by themselves
+- include: config.yml
diff --git a/playbooks/aws/ansible-tower/roles b/playbooks/aws/ansible-tower/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/ansible-tower/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/user_data.txt b/playbooks/aws/ansible-tower/user_data.txt
new file mode 100644
index 000000000..643d17c32
--- /dev/null
+++ b/playbooks/aws/ansible-tower/user_data.txt
@@ -0,0 +1,6 @@
+#cloud-config
+disable_root: 0
+
+system_info:
+ default_user:
+ name: root
diff --git a/playbooks/aws/ansible-tower/vars.ops.yml b/playbooks/aws/ansible-tower/vars.ops.yml
new file mode 100644
index 000000000..feb5d786a
--- /dev/null
+++ b/playbooks/aws/ansible-tower/vars.ops.yml
@@ -0,0 +1,9 @@
+---
+oo_env_long: operations
+oo_zabbix_hostgroups: ['OPS Environment']
+oo_vpc_subnet_id: subnet-4f0bdd38 # USE OPS
+oo_assign_public_ip: yes
+oo_security_group_ids:
+ - sg-02c2f267 # Libra (vpc)
+ - sg-7fc4f41a # ops (vpc)
+ - sg-4dc26829 # ops_tower (vpc)
diff --git a/playbooks/aws/ansible-tower/vars.yml b/playbooks/aws/ansible-tower/vars.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/playbooks/aws/ansible-tower/vars.yml
@@ -0,0 +1 @@
+---
diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml
index 8a5873189..c88828912 100644
--- a/playbooks/aws/openshift-master/config.yml
+++ b/playbooks/aws/openshift-master/config.yml
@@ -1,3 +1,4 @@
+---
- name: "populate oo_hosts_to_config host group if needed"
hosts: localhost
gather_facts: no
@@ -16,11 +17,11 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Setting oo_node_ips fact on localhost
+ - name: Setting openshift_node_ips fact on localhost
set_fact:
- oo_node_ips: "{{ hostvars
+ openshift_node_ips: "{{ hostvars
| oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+ | oo_collect(attribute='ansible_default_ipv4.address') }}"
when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
- name: "Configure instances"
@@ -30,11 +31,12 @@
vars_files:
- vars.yml
roles:
- - ../../../roles/base_os
- - ../../../roles/repos
+ - repos
- {
- role: ../../../roles/openshift_master,
- oo_node_ips: "{{ hostvars['localhost'].oo_node_ips | default(['']) }}",
- oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+ role: openshift_master,
+ openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
+ openshift_env: "{{ oo_env }}"
+ # TODO: openshift_public_ip: set to aws instance public ip
}
- - ../../../roles/pods
+ - pods
+ - os_env_extras
diff --git a/playbooks/aws/openshift-master/filter_plugins b/playbooks/aws/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-master/roles b/playbooks/aws/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml
index e69de29bb..fb5f4ea42 100644
--- a/playbooks/aws/openshift-master/vars.yml
+++ b/playbooks/aws/openshift-master/vars.yml
@@ -0,0 +1,2 @@
+---
+openshift_debug_level: 4
diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml
index 2170f14a3..129464e1f 100644
--- a/playbooks/aws/openshift-node/config.yml
+++ b/playbooks/aws/openshift-node/config.yml
@@ -1,3 +1,4 @@
+---
- name: "populate oo_hosts_to_config host group if needed"
hosts: localhost
gather_facts: no
@@ -16,11 +17,11 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Setting oo_master_ips fact on localhost
+ - name: Setting openshift_master_ips fact on localhost
set_fact:
- oo_master_ips: "{{ hostvars
+ openshift_master_ips: "{{ hostvars
| oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+ | oo_collect(attribute='ansible_default_ipv4.address') }}"
when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
- name: "Configure instances"
@@ -30,11 +31,13 @@
vars_files:
- vars.yml
roles:
- - ../../../roles/base_os
- - ../../../roles/repos
- - ../../../roles/docker
+ - repos
+ - docker
- {
- role: ../../../roles/openshift_node,
- oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}",
- oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}"
+ role: openshift_node,
+ openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
+ # TODO: add openshift_Master_public_ips
+ openshift_env: "{{ oo_env }}"
+ # TODO: openshift_public_ip: set to aws instance public ip
}
+ - os_env_extras
diff --git a/playbooks/aws/openshift-node/filter_plugins b/playbooks/aws/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/openshift-node/roles b/playbooks/aws/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml
index e69de29bb..fb5f4ea42 100644
--- a/playbooks/aws/openshift-node/vars.yml
+++ b/playbooks/aws/openshift-node/vars.yml
@@ -0,0 +1,2 @@
+---
+openshift_debug_level: 4
diff --git a/playbooks/aws/os2-atomic-proxy/config.yml b/playbooks/aws/os2-atomic-proxy/config.yml
index 0124156a9..7d384a665 100644
--- a/playbooks/aws/os2-atomic-proxy/config.yml
+++ b/playbooks/aws/os2-atomic-proxy/config.yml
@@ -16,5 +16,5 @@
- vars.yml
- "vars.{{ oo_env }}.yml"
roles:
- - ../../../roles/atomic_base
- - ../../../roles/atomic_proxy
+ - atomic_base
+ - atomic_proxy
diff --git a/playbooks/aws/os2-atomic-proxy/filter_plugins b/playbooks/aws/os2-atomic-proxy/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/aws/os2-atomic-proxy/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/os2-atomic-proxy/roles b/playbooks/aws/os2-atomic-proxy/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/aws/os2-atomic-proxy/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml
index cfdb5bbbe..7e754074b 100644
--- a/playbooks/gce/openshift-master/config.yml
+++ b/playbooks/gce/openshift-master/config.yml
@@ -1,3 +1,4 @@
+---
- name: "populate oo_hosts_to_config host group if needed"
hosts: localhost
gather_facts: no
@@ -16,11 +17,11 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Setting oo_node_ips fact on localhost
+ - name: Setting openshift_node_ips fact on localhost
set_fact:
- oo_node_ips: "{{ hostvars
+ openshift_node_ips: "{{ hostvars
| oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
- | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+ | oo_collect(attribute='ansible_default_ipv4.address') }}"
when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
- name: "Configure instances"
@@ -30,12 +31,12 @@
vars_files:
- vars.yml
roles:
- - ../../../roles/base_os
- - ../../../roles/repos
+ - repos
- {
- role: ../../../roles/openshift_master,
- oo_node_ips: "{{ hostvars['localhost'].oo_node_ips | default(['']) }}",
- oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}",
- oo_public_ip: "{{ gce_public_ip }}"
+ role: openshift_master,
+ openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
+ openshift_public_ip: "{{ gce_public_ip }}",
+ openshift_env: "{{ oo_env }}",
}
- - ../../../roles/pods
+ - pods
+ - os_env_extras
diff --git a/playbooks/gce/openshift-master/filter_plugins b/playbooks/gce/openshift-master/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/gce/openshift-master/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-master/roles b/playbooks/gce/openshift-master/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gce/openshift-master/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml
index e69de29bb..fb5f4ea42 100644
--- a/playbooks/gce/openshift-master/vars.yml
+++ b/playbooks/gce/openshift-master/vars.yml
@@ -0,0 +1,2 @@
+---
+openshift_debug_level: 4
diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml
index c9cacbc63..85f34e814 100644
--- a/playbooks/gce/openshift-node/config.yml
+++ b/playbooks/gce/openshift-node/config.yml
@@ -1,7 +1,7 @@
+---
- name: "populate oo_hosts_to_config host group if needed"
hosts: localhost
gather_facts: no
-
tasks:
- name: Evaluate oo_host_group_exp
add_host: "name={{ item }} groups=oo_hosts_to_config"
@@ -17,15 +17,15 @@
hosts: localhost
gather_facts: no
tasks:
- - name: Setting oo_master_ips fact on localhost
+ - name: Setting openshift_master_ips fact on localhost
set_fact:
- oo_master_ips: "{{ hostvars
+ openshift_master_ips: "{{ hostvars
| oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
- | oo_collect(attribute='ansible_eth0.ipv4.address') }}"
+ | oo_collect(attribute='ansible_default_ipv4.address') }}"
when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
- - name: Setting oo_master_public_ips fact on localhost
+ - name: Setting openshift_master_public_ips fact on localhost
set_fact:
- oo_master_public_ips: "{{ hostvars
+ openshift_master_public_ips: "{{ hostvars
| oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
| oo_collect(attribute='gce_public_ip') }}"
when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
@@ -37,13 +37,13 @@
vars_files:
- vars.yml
roles:
- - ../../../roles/base_os
- - ../../../roles/repos
- - ../../../roles/docker
+ - repos
+ - docker
- {
- role: ../../../roles/openshift_node,
- oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}",
- oo_master_public_ips: "{{ hostvars['localhost'].oo_master_public_ips | default(['']) }}",
- oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}",
- oo_public_ip: "{{ hostvars[inventory_hostname].ansible_ssh_host }}"
+ role: openshift_node,
+ openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
+ openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
+ openshift_public_ip: "{{ gce_public_ip }}",
+ openshift_env: "{{ oo_env }}",
}
+ - os_env_extras
diff --git a/playbooks/gce/openshift-node/filter_plugins b/playbooks/gce/openshift-node/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/gce/openshift-node/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml
index f2800b061..935599efd 100644
--- a/playbooks/gce/openshift-node/launch.yml
+++ b/playbooks/gce/openshift-node/launch.yml
@@ -45,3 +45,13 @@
# Apply the configs, separate so that just the configs can be run by themselves
- include: config.yml
+
+# Always bounce service to pick up new credentials
+#- name: "Restart instances"
+# hosts: oo_hosts_to_config
+# connection: ssh
+# user: root
+# tasks:
+# - debug: var=groups.oo_hosts_to_config
+# - name: Restart OpenShift
+# service: name=openshift-node enabled=yes state=restarted
diff --git a/playbooks/gce/openshift-node/roles b/playbooks/gce/openshift-node/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/gce/openshift-node/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml
index e69de29bb..fb5f4ea42 100644
--- a/playbooks/gce/openshift-node/vars.yml
+++ b/playbooks/gce/openshift-node/vars.yml
@@ -0,0 +1,2 @@
+---
+openshift_debug_level: 4
diff --git a/roles/ansible/tasks/main.yaml b/roles/ansible/tasks/main.yaml
new file mode 100644
index 000000000..67a04b919
--- /dev/null
+++ b/roles/ansible/tasks/main.yaml
@@ -0,0 +1,7 @@
+---
+# Install ansible client
+
+- name: Install Ansible
+ yum:
+ pkg: ansible
+ state: installed
diff --git a/roles/ansible_tower/tasks/main.yaml b/roles/ansible_tower/tasks/main.yaml
new file mode 100644
index 000000000..f58a5b1c2
--- /dev/null
+++ b/roles/ansible_tower/tasks/main.yaml
@@ -0,0 +1,27 @@
+---
+- name: install some useful packages
+ yum: name={{ item }}
+ with_items:
+ - git
+ - python-pip
+ - unzip
+ - python-psphere
+ - ansible
+ - telnet
+ - ack
+
+- name: download Tower setup
+ get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
+
+- name: extract Tower
+ unarchive: src=/opt/ansible-tower-setup-2.1.1.tar.gz dest=/opt copy=no creates=ansible-tower-setup-2.1.1
+
+- name: Open firewalld port for http
+ firewalld: port=80/tcp permanent=true state=enabled
+
+- name: Open firewalld port for https
+ firewalld: port=443/tcp permanent=true state=enabled
+
+- name: Open firewalld port for https
+ firewalld: port=8080/tcp permanent=true state=enabled
+
diff --git a/roles/atomic_base/defaults/main.yml b/roles/atomic_base/defaults/main.yml
deleted file mode 100644
index 09eac6567..000000000
--- a/roles/atomic_base/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for atomic_base
diff --git a/roles/atomic_base/handlers/main.yml b/roles/atomic_base/handlers/main.yml
deleted file mode 100644
index a9481f6c7..000000000
--- a/roles/atomic_base/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for atomic_base
diff --git a/roles/atomic_proxy/defaults/main.yml b/roles/atomic_proxy/defaults/main.yml
deleted file mode 100644
index 0da428c27..000000000
--- a/roles/atomic_proxy/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for atomic_proxy
diff --git a/roles/base_os/tasks/main.yaml b/roles/base_os/tasks/main.yaml
deleted file mode 100644
index 01d2898c5..000000000
--- a/roles/base_os/tasks/main.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# basic role, configures irbrc, vimrc
-
-- name: Ensure irbrc is installed for user root
- copy:
- src: irbrc
- dest: /root/.irbrc
-
-- name: Ensure vimrc is installed for user root
- copy:
- src: vimrc
- dest: /root/.vimrc
-
-- name: Ensure vimrc is installed for user root
- copy:
- src: vimrc
- dest: /root/.vimrc
-
-- name: Install firewalld
- yum:
- pkg: firewalld
- state: installed
-
-- name: enable firewalld service
- command: /usr/bin/systemctl enable firewalld.service
-
-- name: start firewalld service
- command: /usr/bin/systemctl start firewalld.service
-
-- name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
- pause: seconds=10
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
deleted file mode 100644
index f0327f611..000000000
--- a/roles/docker/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for docker
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
deleted file mode 100644
index a2bea013a..000000000
--- a/roles/docker/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for docker
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 2e9de3abe..2ecefd588 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -3,16 +3,13 @@
- name: Install docker
yum: pkg=docker-io
-- name: enable docker service
- command: /usr/bin/systemctl enable docker.service
-
-- name: start the docker service
- command: /usr/bin/systemctl start docker.service
+- name: enable and start the docker service
+ service: name=docker enabled=yes state=started
- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755
# From the origin rpm there exists instructions on how to
# setup origin properly. The following steps come from there
- name: Change root to be in the Docker group
- command: usermod -G docker -a root
+ user: name=root groups=docker append=yes
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
deleted file mode 100644
index 3806b4c7e..000000000
--- a/roles/docker/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for docker
diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml
deleted file mode 100644
index 8e1a0fa3d..000000000
--- a/roles/etcd/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for etcd
diff --git a/roles/etcd/vars/main.yml b/roles/etcd/vars/main.yml
deleted file mode 100644
index 508856abe..000000000
--- a/roles/etcd/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for etcd
diff --git a/roles/kubernetes_apiserver/README.md b/roles/kubernetes_apiserver/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/kubernetes_apiserver/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/kubernetes_apiserver/defaults/main.yml b/roles/kubernetes_apiserver/defaults/main.yml
deleted file mode 100644
index ab2f8bd50..000000000
--- a/roles/kubernetes_apiserver/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_apiserver
diff --git a/roles/kubernetes_apiserver/handlers/main.yml b/roles/kubernetes_apiserver/handlers/main.yml
deleted file mode 100644
index 5ecb096f0..000000000
--- a/roles/kubernetes_apiserver/handlers/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_apiserver
-- name: restart kubernetes-apiserver
- service: name=kubernetes-apiserver state=restarted
diff --git a/roles/kubernetes_apiserver/meta/main.yml b/roles/kubernetes_apiserver/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/kubernetes_apiserver/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/kubernetes_apiserver/tasks/main.yml b/roles/kubernetes_apiserver/tasks/main.yml
deleted file mode 100644
index 995c2702e..000000000
--- a/roles/kubernetes_apiserver/tasks/main.yml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# tasks file for kubernetes_apiserver
-- name: Install kubernetes
- yum: pkg=kubernetes
-
-- name: Configure apiserver settings
- lineinfile:
- dest: /etc/sysconfig/kubernetes
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - { regex: '^KUBE_API_MACHINES=', line: 'KUBE_API_MACHINES=\"{{ oo_node_ips | join(",") }}\"' }
- - { regex: '^KUBE_API_ADDRESS=', line: 'KUBE_API_ADDRESS=\"0.0.0.0\"' }
- notify:
- - restart kubernetes-apiserver
-
-- name: Enable apiserver
- service: name=kubernetes-apiserver enabled=yes state=started
-
-- name: Open firewalld port for apiserver
- firewalld: port=8080/tcp permanent=false state=enabled
-
-- name: Save firewalld port for apiserver
- firewalld: port=8080/tcp permanent=true state=enabled
-
diff --git a/roles/kubernetes_apiserver/vars/main.yml b/roles/kubernetes_apiserver/vars/main.yml
deleted file mode 100644
index 1f5cb46d6..000000000
--- a/roles/kubernetes_apiserver/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_apiserver
diff --git a/roles/kubernetes_controller_manager/README.md b/roles/kubernetes_controller_manager/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/kubernetes_controller_manager/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/kubernetes_controller_manager/defaults/main.yml b/roles/kubernetes_controller_manager/defaults/main.yml
deleted file mode 100644
index 205f3e164..000000000
--- a/roles/kubernetes_controller_manager/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_controller_manager
diff --git a/roles/kubernetes_controller_manager/handlers/main.yml b/roles/kubernetes_controller_manager/handlers/main.yml
deleted file mode 100644
index a763ccd6c..000000000
--- a/roles/kubernetes_controller_manager/handlers/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# handlers file for kubernetes_controller_manager
-- name: restart kubernetes-controller-manager
- service: name=kubernetes-controller-manager state=restarted
-
diff --git a/roles/kubernetes_controller_manager/meta/main.yml b/roles/kubernetes_controller_manager/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/kubernetes_controller_manager/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/kubernetes_controller_manager/tasks/main.yml b/roles/kubernetes_controller_manager/tasks/main.yml
deleted file mode 100644
index 68aee6f19..000000000
--- a/roles/kubernetes_controller_manager/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# tasks file for kubernetes_controller_manager
-- name: Install kubernetes
- yum: pkg=kubernetes
-
-- name: Enable controller-manager
- service: name=kubernetes-controller-manager enabled=yes state=started
diff --git a/roles/kubernetes_controller_manager/vars/main.yml b/roles/kubernetes_controller_manager/vars/main.yml
deleted file mode 100644
index 4436c9358..000000000
--- a/roles/kubernetes_controller_manager/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_controller_manager
diff --git a/roles/kubernetes_kubelet/README.md b/roles/kubernetes_kubelet/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/kubernetes_kubelet/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/kubernetes_kubelet/defaults/main.yml b/roles/kubernetes_kubelet/defaults/main.yml
deleted file mode 100644
index 72daecfe7..000000000
--- a/roles/kubernetes_kubelet/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_kubelet
diff --git a/roles/kubernetes_kubelet/files/kubelet.service b/roles/kubernetes_kubelet/files/kubelet.service
deleted file mode 100644
index fef69a803..000000000
--- a/roles/kubernetes_kubelet/files/kubelet.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Kubernetes Kubelet Server
-Documentation=https://github.com/GoogleCloudPlatform/kubernetes
-
-[Service]
-EnvironmentFile=/etc/sysconfig/kubelet
-ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
-
-[Install]
-WantedBy=multi-user.target
diff --git a/roles/kubernetes_kubelet/handlers/main.yml b/roles/kubernetes_kubelet/handlers/main.yml
deleted file mode 100644
index 36b0c27b6..000000000
--- a/roles/kubernetes_kubelet/handlers/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_kubelet
-- name: restart kubelet
- service: name=kubernetes-kubelet state=restarted
diff --git a/roles/kubernetes_kubelet/meta/main.yml b/roles/kubernetes_kubelet/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/kubernetes_kubelet/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/kubernetes_kubelet/tasks/main.yml b/roles/kubernetes_kubelet/tasks/main.yml
deleted file mode 100644
index b48c0039a..000000000
--- a/roles/kubernetes_kubelet/tasks/main.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# tasks file for kubernetes_kubelet
-- name: Install kubernetes
- yum: pkg=kubernetes state=installed
-
-- name: Configure kubelet
- lineinfile:
- dest: /etc/sysconfig/kubernetes
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
- - { regex: '^KUBE_KUBELET_ADDRESS=', line: 'KUBE_KUBELET_ADDRESS=\"0.0.0.0\"' }
- - { regex: '^KUBE_KUBELET_HOSTNAME_OVERRIDE=', line: 'KUBE_KUBELET_HOSTNAME_OVERRIDE=\"{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address }}\"' }
- notify:
- - restart kubelet
-
-
-#- name: write the cadvisor config
-# template: src=cadvisor.manifest dest=/etc/kubernetes/manifests/cadvisor.manifest
-# notify:
-# - restart kubelet
-
-- name: Enable kubelet
- service: name=kubernetes-kubelet enabled=yes state=started
-
-- name: Open firewalld port for the kubelet
- firewalld: port=10250/tcp permanent=false state=enabled
-
-- name: Save firewalld port for the kubelet
- firewalld: port=10250/tcp permanent=true state=enabled
diff --git a/roles/kubernetes_kubelet/templates/cadvisor.manifest b/roles/kubernetes_kubelet/templates/cadvisor.manifest
deleted file mode 100644
index 064803cbe..000000000
--- a/roles/kubernetes_kubelet/templates/cadvisor.manifest
+++ /dev/null
@@ -1,33 +0,0 @@
-version: v1beta2
-id: cadvisor-agent
-containers:
- - name: cadvisor
- image: google/cadvisor:latest
- ports:
- - name: http
- containerPort: 8080
- hostPort: 4194
- volumeMounts:
- - name: varrun
- mountPath: /var/run
- readOnly: false
- - name: varlibdocker
- mountPath: /var/lib/docker
- readOnly: true
- - name: cgroups
- mountPath: /sys/fs/cgroup
- readOnly: true
-volumes:
- - name: varrun
- source:
- hostDir:
- path: /var/run
- - name: varlibdocker
- source:
- hostDir:
- path: /var/lib/docker
- - name: cgroups
- source:
- hostDir:
- path: /sys/fs/cgroup
-
diff --git a/roles/kubernetes_kubelet/templates/kubelet b/roles/kubernetes_kubelet/templates/kubelet
deleted file mode 100644
index fbf9321fe..000000000
--- a/roles/kubernetes_kubelet/templates/kubelet
+++ /dev/null
@@ -1,3 +0,0 @@
-
-DAEMON_ARGS=" -etcd_servers=http://10.245.1.2:4001 -hostname_override=10.245.2.2 -address=0.0.0.0 -config=/etc/kubernetes/manifests"
-
diff --git a/roles/kubernetes_kubelet/vars/main.yml b/roles/kubernetes_kubelet/vars/main.yml
deleted file mode 100644
index 000e642a2..000000000
--- a/roles/kubernetes_kubelet/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_kubelet
diff --git a/roles/kubernetes_proxy/README.md b/roles/kubernetes_proxy/README.md
deleted file mode 100644
index 225dd44b9..000000000
--- a/roles/kubernetes_proxy/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/roles/kubernetes_proxy/defaults/main.yml b/roles/kubernetes_proxy/defaults/main.yml
deleted file mode 100644
index e0c322437..000000000
--- a/roles/kubernetes_proxy/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for kubernetes_proxy
diff --git a/roles/kubernetes_proxy/handlers/main.yml b/roles/kubernetes_proxy/handlers/main.yml
deleted file mode 100644
index 86ddde519..000000000
--- a/roles/kubernetes_proxy/handlers/main.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# handlers file for kubernetes_proxy
-- name: restart kubernetes-proxy
- service: name=kubernetes-proxy state=restarted
diff --git a/roles/kubernetes_proxy/meta/main.yml b/roles/kubernetes_proxy/meta/main.yml
deleted file mode 100644
index c5c362c60..000000000
--- a/roles/kubernetes_proxy/meta/main.yml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/kubernetes_proxy/tasks/main.yml b/roles/kubernetes_proxy/tasks/main.yml
deleted file mode 100644
index 407c9ab46..000000000
--- a/roles/kubernetes_proxy/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for kubernetes_proxy
-- name: Install kubernetes
- yum: pkg=kubernetes state=installed
-
-- name: Configure kubernetes-proxy etcd servers
- lineinfile:
- dest: /etc/sysconfig/kubernetes
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - { regex: '^KUBE_ETCD_SERVERS=', line: 'KUBE_ETCD_SERVERS=\"http://{{ oo_master_ips[0] }}:4001\"' }
- notify:
- - restart kubernetes-proxy
-
-- name: Enable proxy
- service: name=kubernetes-proxy enabled=yes state=started
diff --git a/roles/kubernetes_proxy/vars/main.yml b/roles/kubernetes_proxy/vars/main.yml
deleted file mode 100644
index cbdcaa90d..000000000
--- a/roles/kubernetes_proxy/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for kubernetes_proxy
diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md
new file mode 100644
index 000000000..c2ae609ff
--- /dev/null
+++ b/roles/openshift_common/README.md
@@ -0,0 +1,42 @@
+OpenShift Common
+================
+
+OpenShift common installation and configuration tasks.
+
+Requirements
+------------
+
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos.
+
+Role Variables
+--------------
+
+| Name | Default value | |
+|-------------------------------|------------------------------|----------------------------------------|
+| openshift_bind_ip | ansible_default_ipv4.address | IP to use for local binding |
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname_workaround | True | Workaround needed to set hostname to IP address |
+| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
+| openshift_env | default | Envrionment name if multiple OpenShift instances |
+
+Dependencies
+------------
+
+os_firewall
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml
new file mode 100644
index 000000000..a541591fb
--- /dev/null
+++ b/roles/openshift_common/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+openshift_bind_ip: "{{ ansible_default_ipv4.address }}"
+openshift_debug_level: 0
+
+# TODO: Once openshift stops resolving hostnames for node queries remove
+# this...
+openshift_hostname_workaround: true
+openshift_hostname: "{{ openshift_public_ip if openshift_hostname_workaround else ansible_fqdn }}"
diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml
new file mode 100644
index 000000000..88b7677d0
--- /dev/null
+++ b/roles/openshift_common/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: OpenShift Common
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: os_firewall }
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
new file mode 100644
index 000000000..728bba4e4
--- /dev/null
+++ b/roles/openshift_common/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# fixme: Once openshift stops resolving hostnames for node queries remove this...
+- name: Set hostname to IP Addr (WORKAROUND)
+ hostname: name={{ openshift_bind_ip }}
+ when: openshift_hostname_workaround
+
+- name: Configure local facts file
+ file: path=/etc/ansible/facts.d/ state=directory mode=0750
+
+- name: Set common OpenShift facts
+ include: set_facts.yml
+ facts:
+ - section: common
+ option: env
+ value: "{{ openshift_env | default('default') }}"
+ - section: common
+ option: host_type
+ value: "{{ openshift_host_type }}"
+ - section: common
+ option: debug_level
+ value: "{{ openshift_debug_level }}"
diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml
new file mode 100644
index 000000000..349eecd1d
--- /dev/null
+++ b/roles/openshift_common/tasks/set_facts.yml
@@ -0,0 +1,9 @@
+---
+- name: "Setting local_facts"
+ ini_file:
+ dest: /etc/ansible/facts.d/openshift.fact
+ mode: 0640
+ section: "{{ item.section }}"
+ option: "{{ item.option }}"
+ value: "{{ item.value }}"
+ with_items: facts
diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml
new file mode 100644
index 000000000..623aed9bf
--- /dev/null
+++ b/roles/openshift_common/vars/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/
+
+# TODO: Upstream kubernetes only supports iptables currently, if this changes,
+# then these variable should be moved to defaults
+os_firewall_use_firewalld: False
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index 225dd44b9..5a1b889b2 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -1,38 +1,50 @@
-Role Name
-=========
+OpenShift Master
+================
-A brief description of the role goes here.
+OpenShift Master service installation
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-server-7-ose-beta-rpms repos.
Role Variables
--------------
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+From this role:
+| Name | Default value |
+|
+|------------------------------------------|-----------------------|----------------------------------------|
+| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? |
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up |
+| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+
+From openshift_common:
+| Name | Default Value | |
+|-------------------------------|---------------------|---------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname_workaround | True | |
+| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
+| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
Dependencies
------------
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+openshift_common
Example Playbook
----------------
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
+TODO
License
-------
-BSD
+Apache License, Version 2.0
Author Information
------------------
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+TODO
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index c7d14b676..0159afbb5 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,2 +1,16 @@
---
-# defaults file for openshift_master
+openshift_master_manage_service_externally: false
+openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}"
+openshift_node_ips: []
+os_firewall_allow:
+- service: etcd embedded
+ port: 4001/tcp
+- service: etcd peer
+ port: 7001/tcp
+- service: OpenShift api https
+ port: 8443/tcp
+- service: OpenShift web console https
+ port: 8444/tcp
+os_firewall_deny:
+- service: OpenShift api http
+ port: 8080/tcp
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index 5c30dccab..503d08d41 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -1,4 +1,4 @@
---
-# handlers file for openshift_master
- name: restart openshift-master
service: name=openshift-master state=restarted
+ when: not openshift_master_manage_service_externally
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index c5c362c60..41a183c3b 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -1,124 +1,15 @@
---
galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
+ author: Jhon Honce
+ description: OpenShift Master
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 9f28a3469..7a7f02be9 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,33 +1,64 @@
---
-# tasks file for openshift_master
-- name: Install Origin
+- name: Install OpenShift Master package
yum: pkg=openshift-master state=installed
- # fixme: Once openshift stops resolving hostnames for node queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
- command: /usr/bin/hostname {{ oo_bind_ip }}
+- name: Configure OpenShift settings
+ lineinfile:
+ dest: /etc/sysconfig/openshift-master
+ regexp: '^OPTIONS='
+ line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if
+ openshift_node_ips %} --nodes={{ openshift_node_ips
+ | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\""
+ notify:
+ - restart openshift-master
-- name: Configure OpenShift Master settings
+- name: Set default registry url
lineinfile:
dest: /etc/sysconfig/openshift-master
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - { regex: '^OPTIONS=', line: 'OPTIONS=\"--public-master={{ oo_public_ip }} --nodes={{ oo_node_ips | join(",") }} --loglevel=5\"' }
+ regexp: '^IMAGES='
+ line: "IMAGES={{ openshift_registry_url }}"
+ when: openshift_registry_url is defined
notify:
- - restart openshift-master
+ - restart openshift-master
-- name: Open firewalld port for etcd embedded in OpenShift
- firewalld: port=4001/tcp permanent=false state=enabled
+- name: Set master OpenShift facts
+ include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
+ facts:
+ - section: master
+ option: debug_level
+ value: "{{ openshift_master_debug_level }}"
+ - section: master
+ option: public_ip
+ value: "{{ openshift_public_ip }}"
+ - section: master
+ option: externally_managed
+ value: "{{ openshift_master_manage_service_externally }}"
-- name: Save firewalld port for etcd embedded in
- firewalld: port=4001/tcp permanent=true state=enabled
+- name: Start and enable openshift-master
+ service: name=openshift-master enabled=yes state=started
+ when: not openshift_master_manage_service_externally
+ register: result
-- name: Open firewalld port for OpenShift
- firewalld: port=8080/tcp permanent=false state=enabled
+#TODO: remove this when origin PR #1204 has landed in OSE
+- name: need to pause here, otherwise we attempt to copy certificates generated by the master before they are generated
+ pause: seconds=30
+ when: result | changed
-- name: Save firewalld port for OpenShift
- firewalld: port=8080/tcp permanent=true state=enabled
+- name: Disable openshift-master if openshift-master is managed externally
+ service: name=openshift-master enabled=false
+ when: openshift_master_manage_service_externally
-- name: Enable OpenShift
- service: name=openshift-master enabled=yes state=started
+# TODO: create an os_vars role that has generic env related config and move
+# the root kubeconfig setting there, cannot use dependencies to force ordering
+# with openshift_node and openshift_master because the way conditional
+# dependencies work with current ansible would also exclude the
+# openshift_common dependency.
+- name: Create .kube directory
+ file:
+ path: /root/.kube
+ state: directory
+ mode: 0700
+- name: Configure root user kubeconfig
+ command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+ args:
+ creates: /root/.kube/.kubeconfig
diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml
index 1f5cb46d6..9a8c4bba2 100644
--- a/roles/openshift_master/vars/main.yml
+++ b/roles/openshift_master/vars/main.yml
@@ -1,2 +1,2 @@
---
-# vars file for kubernetes_apiserver
+openshift_host_type: master
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 225dd44b9..87913a0d5 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -1,38 +1,51 @@
-Role Name
-=========
+OpenShift Node
+==============
-A brief description of the role goes here.
+OpenShift Node service installation
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+One or more OpenShift Master servers.
+
+A RHEL 7.1 host pre-configured with access to the rhel-7-server-rpms,
+rhel-7-server-extras-rpms, and rhel-server-7-ose-beta-rpms repos.
Role Variables
--------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+From this role:
+| Name | Default value | |
+|------------------------------------------|-----------------------|----------------------------------------|
+| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? |
+| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node |
+| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts |
+| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
+| openshift_registry_url | UNDEF (Optional) | Default docker registry to use |
+
+From openshift_common:
+| Name | Default Value | |
+|-------------------------------|---------------------|---------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname_workaround | True | |
+| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
+| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
Dependencies
------------
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+openshift_common
Example Playbook
----------------
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
+TODO
License
-------
-BSD
+Apache License, Version 2.0
Author Information
------------------
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+TODO
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index af92e96d7..6dc73a96e 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,2 +1,6 @@
---
-# defaults file for openshift_node
+openshift_node_manage_service_externally: false
+openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}"
+os_firewall_allow:
+- service: OpenShift kubelet
+ port: 10250/tcp
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index afbb5a53f..f7aa36d88 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -1,4 +1,4 @@
---
-# handlers file for openshift_node
- name: restart openshift-node
service: name=openshift-node state=restarted
+ when: not openshift_node_manage_service_externally
diff --git a/roles/openshift_node/library/openshift_register_node.py b/roles/openshift_node/library/openshift_register_node.py
new file mode 100644
index 000000000..4b306db9f
--- /dev/null
+++ b/roles/openshift_node/library/openshift_register_node.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import os
+import multiprocessing
+import socket
+from subprocess import check_output, Popen
+
+DOCUMENTATION = '''
+---
+module: openshift_register_node
+short_description: This module registers an openshift-node with an openshift-master
+author: Jason DeTiberus
+requirements: [ openshift-node ]
+notes: Node resources can be specified using either the resources option or the following options: cpu, memory
+options:
+ name:
+ description:
+ - id for this node (usually the node fqdn)
+ required: true
+ hostIP:
+ description:
+ - ip address for this node
+ required: false
+ cpu:
+ description:
+ - number of CPUs for this node
+ required: false
+ default: number of logical CPUs detected
+ memory:
+ description:
+ - Memory available for this node in bytes
+ required: false
+ default: 80% MemTotal
+ resources:
+ description:
+ - A json string representing Node resources
+ required: false
+'''
+EXAMPLES = '''
+# Minimal node registration
+- openshift_register_node: name=ose3.node.example.com
+
+# Node registration with all options (using cpu and memory options)
+- openshift_register_node:
+ name: ose3.node.example.com
+ hostIP: 192.168.1.1
+ apiVersion: v1beta1
+ cpu: 1
+ memory: 1073741824
+
+# Node registration with all options (using resources option)
+- openshift_register_node:
+ name: ose3.node.example.com
+ hostIP: 192.168.1.1
+ apiVersion: v1beta1
+ resources:
+ capacity:
+ cpu: 1
+ memory: 1073741824
+'''
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required = True),
+ hostIP = dict(),
+ apiVersion = dict(),
+ cpu = dict(),
+ memory = dict(),
+ resources = dict(),
+ client_config = dict(),
+ client_cluster = dict(default = 'master'),
+ client_context = dict(default = 'master'),
+ client_user = dict(default = 'admin')
+ ),
+ mutually_exclusive = [
+ ['resources', 'cpu'],
+ ['resources', 'memory']
+ ],
+ supports_check_mode=True
+ )
+
+ user_has_client_config = os.path.exists(os.path.expanduser('~/.kube/.kubeconfig'))
+ if not (user_has_client_config or module.params['client_config']):
+ module.fail_json(msg="Could not locate client configuration, "
+ "client_config must be specified if "
+ "~/.kube/.kubeconfig is not present")
+
+ client_opts = []
+ if module.params['client_config']:
+ client_opts.append("--kubeconfig=%s" % module.params['client_config'])
+
+ try:
+ output = check_output(["/usr/bin/openshift", "ex", "config", "view",
+ "-o", "json"] + client_opts,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ module.fail_json(msg="Failed to get client configuration",
+ command=e.cmd, returncode=e.returncode, output=e.output)
+
+ config = json.loads(output)
+ if not (bool(config['clusters']) or bool(config['contexts']) or
+ bool(config['current-context']) or bool(config['users'])):
+ module.fail_json(msg="Client config missing required values",
+ output=output)
+
+ client_context = module.params['client_context']
+ if client_context:
+ if client_context not in config['contexts']:
+ module.fail_json(msg="Context %s not found in client config" %
+ client_context)
+ if not config['current-context'] or config['current-context'] != client_context:
+ client_opts.append("--context=%s" % client_context)
+
+ client_user = module.params['client_user']
+ if client_user:
+ if client_user not in config['users']:
+ module.fail_json(msg="User %s not found in client config" %
+ client_user)
+ if client_user != config['contexts'][client_context]['user']:
+ client_opts.append("--user=%s" % client_user)
+
+ client_cluster = module.params['client_cluster']
+ if client_cluster:
+ if client_cluster not in config['clusters']:
+ module.fail_json(msg="Cluster %s not found in client config" %
+ client_cluster)
+ if client_cluster != config['contexts'][client_context]['cluster']:
+ client_opts.append("--cluster=%s" % client_cluster)
+
+ node_def = dict(
+ id = module.params['name'],
+ kind = 'Node',
+ apiVersion = 'v1beta1',
+ resources = dict(
+ capacity = dict()
+ )
+ )
+
+ for key, value in module.params.iteritems():
+ if key in ['cpu', 'memory']:
+ node_def['resources']['capacity'][key] = value
+ elif key == 'name':
+ node_def['id'] = value
+ elif key != 'client_config':
+ if value:
+ node_def[key] = value
+
+ if not node_def['resources']['capacity']['cpu']:
+ node_def['resources']['capacity']['cpu'] = multiprocessing.cpu_count()
+
+ if not node_def['resources']['capacity']['memory']:
+ with open('/proc/meminfo', 'r') as mem:
+ for line in mem:
+ entries = line.split()
+ if str(entries.pop(0)) == 'MemTotal:':
+ mem_total_kb = int(entries.pop(0))
+ mem_capacity = int(mem_total_kb * 1024 * .75)
+ node_def['resources']['capacity']['memory'] = mem_capacity
+ break
+
+ try:
+ output = check_output(["/usr/bin/osc", "get", "nodes"] + client_opts,
+ stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ module.fail_json(msg="Failed to get node list", command=e.cmd,
+ returncode=e.returncode, output=e.output)
+
+ if re.search(module.params['name'], output, re.MULTILINE):
+ module.exit_json(changed=False, node_def=node_def)
+ elif module.check_mode:
+ module.exit_json(changed=True, node_def=node_def)
+
+ config_def = dict(
+ metadata = dict(
+ name = "add-node-%s" % module.params['name']
+ ),
+ kind = 'Config',
+ apiVersion = 'v1beta1',
+ items = [node_def]
+ )
+
+ p = Popen(["/usr/bin/osc"] + client_opts + ["create", "node"] + ["-f", "-"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, close_fds=True)
+ (out, err) = p.communicate(module.jsonify(config_def))
+ ret = p.returncode
+
+ if ret != 0:
+ if re.search("minion \"%s\" already exists" % module.params['name'],
+ err):
+ module.exit_json(changed=False,
+ msg="node definition already exists", config_def=config_def)
+ else:
+ module.fail_json(msg="Node creation failed.", ret=ret, out=out,
+ err=err, config_def=config_def)
+
+ module.exit_json(changed=True, out=out, err=err, ret=ret,
+ node_def=config_def)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c5c362c60..c92008a77 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -1,124 +1,15 @@
---
galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
- min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
-dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
+ author: Jhon Honce
+ description: OpenShift Node
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- { role: openshift_common }
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index e0041a90c..df2722a94 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,43 +1,79 @@
---
-
-# tasks file for openshift_node
-- name: Install OpenShift
+- name: Install OpenShift Node package
yum: pkg=openshift-node state=installed
- # fixme: Once openshift stops resolving hostnames for node queries remove this...
-- name: Set hostname to IP Addr (WORKAROUND)
- hostname: name={{ oo_bind_ip }}
-
- local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
register: mktemp
- name: Retrieve OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ oo_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
+ local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' root@{{ openshift_master_public_ips[0] }}:/var/lib/openshift/openshift.local.certificates/admin/ {{ mktemp.stdout }}
ignore_errors: yes
- file: path=/var/lib/openshift/openshift.local.certificates/admin state=directory
- name: Store OpenShift Master credentials
- local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ oo_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
+ local_action: command /usr/bin/rsync --compress --archive --rsh 'ssh -S none -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' {{ mktemp.stdout }}/ root@{{ openshift_public_ip }}:/var/lib/openshift/openshift.local.certificates/admin
ignore_errors: yes
+- local_action: file name={{ mktemp.stdout }} state=absent
+
- name: Configure OpenShift Node settings
lineinfile:
dest: /etc/sysconfig/openshift-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- with_items:
- - { regex: '^OPTIONS=', line: 'OPTIONS=\"--master=http://{{ oo_master_ips[0] }}:8080 --loglevel=5\"' }
+ regexp: '^OPTIONS='
+ line: "OPTIONS=\"--master=https://{{ openshift_master_ips[0] }}:8443 --loglevel={{ openshift_node_debug_level }}\""
+ notify:
+ - restart openshift-node
+
+- name: Set default registry url
+ lineinfile:
+ dest: /etc/sysconfig/openshift-node
+ regexp: '^IMAGES='
+ line: "IMAGES={{ openshift_registry_url }}"
+ when: openshift_registry_url is defined
notify:
- - restart openshift-node
+ - restart openshift-node
-- name: Open firewalld port for OpenShift
- firewalld: port=10250/tcp permanent=false state=enabled
+- name: Set OpenShift node facts
+ include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
+ facts:
+ - section: node
+ option: debug_level
+ value: "{{ openshift_node_debug_level }}"
+ - section: node
+ option: public_ip
+ value: "{{ openshift_public_ip }}"
+ - section: node
+ option: externally_managed
+ value: "{{ openshift_node_manage_service_externally }}"
-- name: Save firewalld port for OpenShift
- firewalld: port=10250/tcp permanent=true state=enabled
+# fixme: Once the openshift_cluster playbook is published state should be started
+# Always bounce service to pick up new credentials
+- name: Start and enable openshift-node
+ service: name=openshift-node enabled=yes state=restarted
+ when: not openshift_node_manage_service_externally
-- name: Enable OpenShift
- service: name=openshift-node enabled=yes state=started
+- name: Disable openshift-node if openshift-node is managed externally
+ service: name=openshift-node enabled=false
+ when: openshift_node_manage_service_externally
-- local_action: file name={{ mktemp.stdout }} state=absent
+# TODO: create an os_vars role that has generic env related config and move
+# the root kubeconfig setting there, cannot use dependencies to force ordering
+# with openshift_node and openshift_master because the way conditional
+# dependencies work with current ansible would also exclude the
+# openshift_common dependency.
+- name: Create .kube directory
+ file:
+ path: /root/.kube
+ state: directory
+ mode: 0700
+- name: Configure root user kubeconfig
+ command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig
+ args:
+ creates: /root/.kube/.kubeconfig
+
+# TODO: expose openshift_register_node options to allow for overriding the
+# defaults.
+- name: Register node (if not already registered)
+ openshift_register_node:
+ name: "{{ openshift_hostname }}"
diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml
index 3184e8ac7..9841d52f9 100644
--- a/roles/openshift_node/vars/main.yml
+++ b/roles/openshift_node/vars/main.yml
@@ -1,2 +1,2 @@
---
-# vars file for openshift_node
+openshift_host_type: node
diff --git a/roles/openshift_sdn_master/README.md b/roles/openshift_sdn_master/README.md
new file mode 100644
index 000000000..d0dcf6d11
--- /dev/null
+++ b/roles/openshift_sdn_master/README.md
@@ -0,0 +1,41 @@
+OpenShift SDN Master
+====================
+
+OpenShift SDN Master service installation
+
+Requirements
+------------
+
+A host with the openshift_master role applied
+
+Role Variables
+--------------
+
+From this role:
+| Name | Default value | |
+|----------------------------------|-----------------------|--------------------------------------------------|
+| openshift_sdn_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+
+From openshift_common:
+| Name | Default value | |
+|-----------------------|---------------|--------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml
new file mode 100644
index 000000000..da7655546
--- /dev/null
+++ b/roles/openshift_sdn_master/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_master/handlers/main.yml b/roles/openshift_sdn_master/handlers/main.yml
new file mode 100644
index 000000000..cd645f2c5
--- /dev/null
+++ b/roles/openshift_sdn_master/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart openshift-sdn-master
+ service: name=openshift-sdn-master state=restarted
diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml
new file mode 100644
index 000000000..e6e5514d1
--- /dev/null
+++ b/roles/openshift_sdn_master/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: OpenShift SDN Master
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies: []
diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml
new file mode 100644
index 000000000..e1761afdc
--- /dev/null
+++ b/roles/openshift_sdn_master/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: Install openshift-sdn-master
+ yum:
+ pkg: openshift-sdn-master
+ state: installed
+
+- name: Configure openshift-sdn-master settings
+ lineinfile:
+ dest: /etc/sysconfig/openshift-sdn-master
+ regexp: '^OPTIONS='
+ line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\""
+ notify:
+ - restart openshift-sdn-master
+
+- name: Set openshift-sdn-master facts
+ include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
+ facts:
+ - section: sdn-master
+ option: debug_level
+ value: "{{ openshift_sdn_master_debug_level }}"
+
+- name: Enable openshift-sdn-master
+ service:
+ name: openshift-sdn-master
+ enabled: yes
+ state: started
diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md
new file mode 100644
index 000000000..294550219
--- /dev/null
+++ b/roles/openshift_sdn_node/README.md
@@ -0,0 +1,51 @@
+OpenShift SDN Node
+==================
+
+OpenShift SDN Node service installation
+
+Requirements
+------------
+
+A host with the openshift_node role applied
+
+Role Variables
+--------------
+
+From this role:
+| Name | Default value | |
+|--------------------------------|-----------------------|--------------------------------------------------|
+| openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master |
+
+
+From openshift_node:
+| Name | Default value | |
+|-----------------------|------------------|--------------------------------------|
+| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication |
+
+
+From openshift_common:
+| Name | Default value | |
+|-------------------------------|---------------------|----------------------------------------|
+| openshift_debug_level | 0 | Global openshift debug log verbosity |
+| openshift_hostname_workaround | True | |
+| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host |
+| openshift_hostname | openshift_public_ip if openshift_hostname_workaround else ansible_fqdn | hostname to use for this instance |
+
+Dependencies
+------------
+
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+TODO
diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml
new file mode 100644
index 000000000..9612d9d91
--- /dev/null
+++ b/roles/openshift_sdn_node/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}"
diff --git a/roles/openshift_sdn_node/handlers/main.yml b/roles/openshift_sdn_node/handlers/main.yml
new file mode 100644
index 000000000..402d82149
--- /dev/null
+++ b/roles/openshift_sdn_node/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: restart openshift-sdn-node
+ service: name=openshift-sdn-node state=restarted
diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml
new file mode 100644
index 000000000..ab45ff51e
--- /dev/null
+++ b/roles/openshift_sdn_node/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Jason DeTiberus
+ description: OpenShift SDN Node
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies: []
diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml
new file mode 100644
index 000000000..ff05a6972
--- /dev/null
+++ b/roles/openshift_sdn_node/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+- name: Install openshift-sdn-node
+ yum:
+ pkg: openshift-sdn-node
+ state: installed
+
+# TODO: we are specifying -hostname= for OPTIONS as a workaround for
+# openshift-sdn-node not properly detecting the hostname.
+- name: Configure openshift-sdn-node settings
+ lineinfile:
+ dest: /etc/sysconfig/openshift-sdn-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ backrefs: yes
+ with_items:
+ - regex: '^(OPTIONS=)'
+ line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"'
+ - regex: '^(MASTER_URL=)'
+ line: '\1"http://{{ openshift_master_ips | first }}:4001"'
+ - regex: '^(MINION_IP=)'
+ line: '\1"{{ openshift_public_ip }}"'
+ # TODO lock down the insecure-registry config to a more sane value than
+ # 0.0.0.0/0
+ - regex: '^(DOCKER_OPTIONS=)'
+ line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"'
+ notify: restart openshift-sdn-node
+
+- name: Set openshift-sdn-node facts
+ include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml"
+ facts:
+ - section: sdn-node
+ option: debug_level
+ value: "{{ openshift_sdn_node_debug_level }}"
+
+# fixme: Once the openshift_cluster playbook is published state should be started
+# Always bounce service to pick up new credentials
+- name: Start and enable openshift-sdn-node
+ service:
+ name: openshift-sdn-node
+ enabled: yes
+ state: restarted
diff --git a/roles/base_os/files/irbrc b/roles/os_env_extras/files/irbrc
index 47374e920..47374e920 100644
--- a/roles/base_os/files/irbrc
+++ b/roles/os_env_extras/files/irbrc
diff --git a/roles/base_os/files/vimrc b/roles/os_env_extras/files/vimrc
index 537b944ed..537b944ed 100644
--- a/roles/base_os/files/vimrc
+++ b/roles/os_env_extras/files/vimrc
diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml
new file mode 100644
index 000000000..96b12ad5b
--- /dev/null
+++ b/roles/os_env_extras/tasks/main.yaml
@@ -0,0 +1,17 @@
+---
+# environment configuration role, configures irbrc, vimrc
+
+- name: Ensure irbrc is installed for user root
+ copy:
+ src: irbrc
+ dest: /root/.irbrc
+
+- name: Ensure vimrc is installed for user root
+ copy:
+ src: vimrc
+ dest: /root/.vimrc
+
+- name: Bash Completion
+ yum:
+ pkg: bash-completion
+ state: installed
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
new file mode 100644
index 000000000..187d74b06
--- /dev/null
+++ b/roles/os_firewall/README.md
@@ -0,0 +1,66 @@
+OS Firewall
+===========
+
+OS Firewall manages firewalld and iptables firewall settings for a minimal use
+case (Adding/Removing rules based on protocol and port number).
+
+Requirements
+------------
+
+None.
+
+Role Variables
+--------------
+
+| Name | Default | |
+|---------------------------|---------|----------------------------------------|
+| os_firewall_use_firewalld | True | If false, use iptables |
+| os_firewall_allow | [] | List of service,port mappings to allow |
+| os_firewall_deny | [] | List of service, port mappings to deny |
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+Use iptables and open tcp ports 80 and 443:
+```
+---
+- hosts: servers
+ vars:
+ os_firewall_use_firewalld: false
+ os_firewall_allow:
+ - service: httpd
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ roles:
+ - os_firewall
+```
+
+Use firewalld and open tcp port 443 and close previously open tcp port 80:
+```
+---
+- hosts: servers
+ vars:
+ os_firewall_allow:
+ - service: https
+ port: 443/tcp
+ os_firewall_deny:
+ - service: httpd
+ port: 80/tcp
+ roles:
+ - os_firewall
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+Jason DeTiberus - jdetiber@redhat.com
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
new file mode 100644
index 000000000..bcf1d9a34
--- /dev/null
+++ b/roles/os_firewall/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+os_firewall_use_firewalld: True
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
new file mode 100644
index 000000000..fef710055
--- /dev/null
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from subprocess import call, check_output
+
+DOCUMENTATION = '''
+---
+module: os_firewall_manage_iptables
+short_description: This module manages iptables rules for a given chain
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+
+class IpTablesError(Exception):
+ def __init__(self, msg, cmd, exit_code, output):
+ self.msg = msg
+ self.cmd = cmd
+ self.exit_code = exit_code
+ self.output = output
+
+
+class IpTablesAddRuleError(IpTablesError):
+ pass
+
+
+class IpTablesRemoveRuleError(IpTablesError):
+ pass
+
+
+class IpTablesSaveError(IpTablesError):
+ pass
+
+
+class IpTablesCreateChainError(IpTablesError):
+ def __init__(self, chain, msg, cmd, exit_code, output):
+ super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code, output)
+ self.chain = chain
+
+
+class IpTablesCreateJumpRuleError(IpTablesError):
+ def __init__(self, chain, msg, cmd, exit_code, output):
+ super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
+ output)
+ self.chain = chain
+
+
+# TODO: impliment rollbacks for any events that where successful and an
+# exception was thrown later. for example, when the chain is created
+# successfully, but the add/remove rule fails.
+class IpTablesManager:
+ def __init__(self, module, ip_version, check_mode, chain):
+ self.module = module
+ self.ip_version = ip_version
+ self.check_mode = check_mode
+ self.chain = chain
+ self.cmd = self.gen_cmd()
+ self.save_cmd = self.gen_save_cmd()
+ self.output = []
+ self.changed = False
+
+ def save(self):
+ try:
+ self.output.append(check_output(self.save_cmd,
+ stderr=subprocess.STDOUT))
+ except subprocess.CalledProcessError as e:
+ raise IpTablesSaveError(
+ msg="Failed to save iptables rules",
+ cmd=e.cmd, exit_code=e.returncode, output=e.output)
+
+ def add_rule(self, port, proto):
+ rule = self.gen_rule(port, proto)
+ if not self.rule_exists(rule):
+ if not self.chain_exists():
+ self.create_chain()
+ if not self.jump_rule_exists():
+ self.create_jump_rule()
+
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create rule for %s %s" % (proto, port))
+ else:
+ cmd = self.cmd + ['-A'] + rule
+ try:
+ self.output.append(check_output(cmd))
+ self.changed = True
+ self.save()
+ except subprocess.CalledProcessError as e:
+ raise IpTablesCreateChainError(
+ chain=self.chain,
+ msg="Failed to create rule for "
+ "%s %s" % (self.proto, self.port),
+ cmd=e.cmd, exit_code=e.returncode,
+ output=e.output)
+
+ def remove_rule(self, port, proto):
+ rule = self.gen_rule(port, proto)
+ if self.rule_exists(rule):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Remove rule for %s %s" % (proto, port))
+ else:
+ cmd = self.cmd + ['-D'] + rule
+ try:
+ self.output.append(check_output(cmd))
+ self.changed = True
+ self.save()
+ except subprocess.CalledProcessError as e:
+ raise IpTablesRemoveChainError(
+ chain=self.chain,
+ msg="Failed to remove rule for %s %s" % (proto, port),
+ cmd=e.cmd, exit_code=e.returncode, output=e.output)
+
+ def rule_exists(self, rule):
+ check_cmd = self.cmd + ['-C'] + rule
+ return True if subprocess.call(check_cmd) == 0 else False
+
+ def gen_rule(self, port, proto):
+ return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
+ '-m', proto, '--dport', str(port), '-j', 'ACCEPT']
+
+ def create_jump_rule(self):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create jump rule for chain %s" % self.chain)
+ else:
+ try:
+ cmd = self.cmd + ['-L', 'INPUT', '--line-numbers']
+ output = check_output(cmd, stderr=subprocess.STDOUT)
+
+ # break the input rules into rows and columns
+ input_rules = map(lambda s: s.split(), output.split('\n'))
+
+ # Find the last numbered rule
+ last_rule_num = None
+ last_rule_target = None
+ for rule in input_rules[:-1]:
+ if rule:
+ try:
+ last_rule_num = int(rule[0])
+ except ValueError:
+ continue
+ last_rule_target = rule[1]
+
+ # Raise an exception if we do not find a valid INPUT rule
+ if not last_rule_num or not last_rule_target:
+ raise IpTablesCreateJumpRuleError(
+ chain=self.chain,
+ msg="Failed to find existing INPUT rules",
+ cmd=None, exit_code=None, output=None)
+
+ # Naively assume that if the last row is a REJECT rule, then
+ # we can add insert our rule right before it, otherwise we
+ # assume that we can just append the rule.
+ if last_rule_target == 'REJECT':
+ # insert rule
+ cmd = self.cmd + ['-I', 'INPUT', str(last_rule_num)]
+ else:
+ # append rule
+ cmd = self.cmd + ['-A', 'INPUT']
+ cmd += ['-j', self.chain]
+ output = check_output(cmd, stderr=subprocess.STDOUT)
+ changed = True
+ self.output.append(output)
+ except subprocess.CalledProcessError as e:
+ if '--line-numbers' in e.cmd:
+ raise IpTablesCreateJumpRuleError(
+ chain=self.chain,
+ msg="Failed to query existing INPUT rules to "
+ "determine jump rule location",
+ cmd=e.cmd, exit_code=e.returncode,
+ output=e.output)
+ else:
+ raise IpTablesCreateJumpRuleError(
+ chain=self.chain,
+ msg="Failed to create jump rule for chain %s" %
+ self.chain,
+ cmd=e.cmd, exit_code=e.returncode,
+ output=e.output)
+
+ def create_chain(self):
+ if self.check_mode:
+ self.changed = True
+ self.output.append("Create chain %s" % self.chain)
+ else:
+ try:
+ cmd = self.cmd + ['-N', self.chain]
+ self.output.append(check_output(cmd,
+ stderr=subprocess.STDOUT))
+ self.changed = True
+ self.output.append("Successfully created chain %s" %
+ self.chain)
+ except subprocess.CalledProcessError as e:
+ raise IpTablesCreateChainError(
+ chain=self.chain,
+ msg="Failed to create chain: %s" % self.chain,
+ cmd=e.cmd, exit_code=e.returncode, output=e.output
+ )
+
+ def jump_rule_exists(self):
+ cmd = self.cmd + ['-C', 'INPUT', '-j', self.chain]
+ return True if subprocess.call(cmd) == 0 else False
+
+ def chain_exists(self):
+ cmd = self.cmd + ['-L', self.chain]
+ return True if subprocess.call(cmd) == 0 else False
+
+ def gen_cmd(self):
+ cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ return ["/usr/sbin/%s" % cmd]
+
+ def gen_save_cmd(self):
+ cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
+ return ['/usr/libexec/iptables/iptables.init', 'save']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ action=dict(required=True, choices=['add', 'remove']),
+ protocol=dict(required=True, choices=['tcp', 'udp']),
+ port=dict(required=True, type='int'),
+ ip_version=dict(required=False, default='ipv4',
+ choices=['ipv4', 'ipv6']),
+ ),
+ supports_check_mode=True
+ )
+
+ action = module.params['action']
+ protocol = module.params['protocol']
+ port = module.params['port']
+ ip_version = module.params['ip_version']
+ chain = 'OS_FIREWALL_ALLOW'
+
+ iptables_manager = IpTablesManager(module, ip_version, module.check_mode, chain)
+
+ try:
+ if action == 'add':
+ iptables_manager.add_rule(port, protocol)
+ elif action == 'remove':
+ iptables_manager.remove_rule(port, protocol)
+ except IpTablesError as e:
+ module.fail_json(msg=e.msg)
+
+ return module.exit_json(changed=iptables_manager.changed,
+ output=iptables_manager.output)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml
new file mode 100644
index 000000000..7a8cef6c5
--- /dev/null
+++ b/roles/os_firewall/meta/main.yml
@@ -0,0 +1,13 @@
+galaxy_info:
+ author: Jason DeTiberus
+ description: os_firewall
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.7
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - system
+dependencies: []
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
new file mode 100644
index 000000000..469cfab6f
--- /dev/null
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -0,0 +1,75 @@
+---
+- name: Install firewalld packages
+ yum:
+ name: firewalld
+ state: present
+
+- name: Check if iptables-services is installed
+ command: rpm -q iptables-services
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Ensure iptables services are not enabled
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items:
+ - iptables
+ - ip6tables
+ when: pkg_check.rc == 0
+
+- name: Start and enable firewalld service
+ service:
+ name: firewalld
+ state: started
+ enabled: yes
+ register: result
+
+- name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
+ pause: seconds=10
+ when: result | changed
+
+- name: Mask iptables services
+ command: systemctl mask "{{ item }}"
+ register: result
+ changed_when: "'iptables' in result.stdout"
+ with_items:
+ - iptables
+ - ip6tables
+ when: pkg_check.rc == 0
+
+# TODO: Ansible 1.9 will eliminate the need for separate firewalld tasks for
+# enabling rules and making them permanent with the immediate flag
+- name: Add firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: false
+ state: enabled
+ with_items: os_firewall_allow
+ when: os_firewall_allow is defined
+
+- name: Persist firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ state: enabled
+ with_items: os_firewall_allow
+ when: os_firewall_allow is defined
+
+- name: Remove firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: false
+ state: disabled
+ with_items: os_firewall_deny
+ when: os_firewall_deny is defined
+
+- name: Persist removal of firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ state: disabled
+ with_items: os_firewall_deny
+ when: os_firewall_deny is defined
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
new file mode 100644
index 000000000..87e77c083
--- /dev/null
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -0,0 +1,60 @@
+---
+- name: Install iptables packages
+ yum:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iptables
+ - iptables-services
+
+- name: Check if firewalld is installed
+ command: rpm -q firewalld
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Ensure firewalld service is not enabled
+ service:
+ name: firewalld
+ state: stopped
+ enabled: no
+ when: pkg_check.rc == 0
+
+- name: Start and enable iptables services
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items:
+ - iptables
+ - ip6tables
+ register: result
+
+- name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
+ pause: seconds=10
+ when: result | changed
+
+# TODO: submit PR upstream to add mask/unmask to service module
+- name: Mask firewalld service
+ command: systemctl mask firewalld
+ register: result
+ changed_when: "'firewalld' in result.stdout"
+ when: pkg_check.rc == 0
+
+- name: Add iptables allow rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ with_items: os_firewall_allow
+ when: os_firewall_allow is defined
+
+- name: Remove iptables rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: remove
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ with_items: os_firewall_deny
+ when: os_firewall_deny is defined
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
new file mode 100644
index 000000000..ad89ef97c
--- /dev/null
+++ b/roles/os_firewall/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- include: firewall/firewalld.yml
+ when: os_firewall_use_firewalld
+
+- include: firewall/iptables.yml
+ when: not os_firewall_use_firewalld
diff --git a/roles/os_ipv6_disable/tasks/main.yaml b/roles/os_ipv6_disable/tasks/main.yaml
new file mode 100644
index 000000000..fae5beee7
--- /dev/null
+++ b/roles/os_ipv6_disable/tasks/main.yaml
@@ -0,0 +1,11 @@
+---
+# Disable ipv6 on RHEL7
+
+- name: Disable all ipv6
+ sysctl: name="net.ipv6.conf.all.disable_ipv6" value=1 sysctl_set=yes state=present reload=yes
+
+- name: Disable default ipv6
+ sysctl: name="net.ipv6.conf.default.disable_ipv6" value=1 sysctl_set=yes state=present reload=yes
+
+- name: Remove ipv6 localhost from /etc/hosts
+ lineinfile: dest='/etc/hosts' regexp='^::1 ' state=absent owner=root group=root mode=0644
diff --git a/roles/pods/defaults/main.yml b/roles/pods/defaults/main.yml
deleted file mode 100644
index 027ac0fd8..000000000
--- a/roles/pods/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for pods
diff --git a/roles/pods/handlers/main.yml b/roles/pods/handlers/main.yml
deleted file mode 100644
index 809f95836..000000000
--- a/roles/pods/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for pods
diff --git a/roles/pods/vars/main.yml b/roles/pods/vars/main.yml
deleted file mode 100644
index c9ed1df03..000000000
--- a/roles/pods/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for pods
diff --git a/roles/repos/defaults/main.yaml b/roles/repos/defaults/main.yaml
new file mode 100644
index 000000000..6fe2bf621
--- /dev/null
+++ b/roles/repos/defaults/main.yaml
@@ -0,0 +1,5 @@
+---
+# TODO: once we are able to configure/deploy origin using the openshift roles,
+# then we should default to origin
+openshift_deployment_type: online
+openshift_additional_repos: {}
diff --git a/roles/repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
new file mode 100644
index 000000000..7b40671a4
--- /dev/null
+++ b/roles/repos/files/online/RPM-GPG-KEY-redhat-beta
@@ -0,0 +1,61 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.2.6 (GNU/Linux)
+
+mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT
+kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A
+BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo
+gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P
+xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D
+FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7
+Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i
+QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm
+G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt
+0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR
+fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB
+tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv
+bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT
+ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy
+6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ
+OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6
+0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc
+MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u
+QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE
+Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6
+DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0
+B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH
+V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT
+CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ==
+=21pb
+-----END PGP PUBLIC KEY BLOCK-----
+The following public key can be used to verify RPM packages built and
+signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG
+package. Questions about this key should be sent to security@redhat.com.
+
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.0.6 (GNU/Linux)
+Comment: For info see http://www.gnupg.org
+
+mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp
+Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd
+LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi
+UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe
+II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW
+QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz
++AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1
+VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI
+mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg
+SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX
+BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4
+F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF
+AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q
+0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc
+RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI
+JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR
+xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU
+ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5
+WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI
+RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL
+yACfb68fBd2pWEzLKsOk9imIobHHpzE=
+=gpIn
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/roles/repos/files/RPM-GPG-KEY-redhat-release b/roles/repos/files/online/RPM-GPG-KEY-redhat-release
index 0f83b622d..0f83b622d 100644
--- a/roles/repos/files/RPM-GPG-KEY-redhat-release
+++ b/roles/repos/files/online/RPM-GPG-KEY-redhat-release
diff --git a/roles/repos/files/epel7-kubernetes.repo b/roles/repos/files/online/epel7-kubernetes.repo
index 1deae2939..1deae2939 100644
--- a/roles/repos/files/epel7-kubernetes.repo
+++ b/roles/repos/files/online/epel7-kubernetes.repo
diff --git a/roles/repos/files/epel7-openshift.repo b/roles/repos/files/online/epel7-openshift.repo
index c7629872d..c7629872d 100644
--- a/roles/repos/files/epel7-openshift.repo
+++ b/roles/repos/files/online/epel7-openshift.repo
diff --git a/roles/repos/files/oso-rhui-rhel-7-extras.repo b/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
index cfe41f691..cfe41f691 100644
--- a/roles/repos/files/oso-rhui-rhel-7-extras.repo
+++ b/roles/repos/files/online/oso-rhui-rhel-7-extras.repo
diff --git a/roles/repos/files/oso-rhui-rhel-7-server.repo b/roles/repos/files/online/oso-rhui-rhel-7-server.repo
index 9fe4d6623..ddc93193d 100644
--- a/roles/repos/files/oso-rhui-rhel-7-server.repo
+++ b/roles/repos/files/online/oso-rhui-rhel-7-server.repo
@@ -4,7 +4,7 @@ baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/
enabled=1
gpgcheck=1
-gpgkey=file:///srv/libra/keys/RPM-GPG-KEY-redhat-release
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
sslverify=False
sslclientcert=/var/lib/yum/client-cert.pem
sslclientkey=/var/lib/yum/client-key.pem
@@ -15,7 +15,7 @@ baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-op
https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/
enabled=1
gpgcheck=1
-gpgkey=file:///srv/libra/keys/RPM-GPG-KEY-redhat-release
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
sslverify=False
sslclientcert=/var/lib/yum/client-cert.pem
sslclientkey=/var/lib/yum/client-key.pem
diff --git a/roles/repos/files/rhel-7-libra-candidate.repo b/roles/repos/files/online/rhel-7-libra-candidate.repo
index b4215679f..b4215679f 100644
--- a/roles/repos/files/rhel-7-libra-candidate.repo
+++ b/roles/repos/files/online/rhel-7-libra-candidate.repo
diff --git a/roles/repos/tasks/main.yaml b/roles/repos/tasks/main.yaml
index a6a80581e..43786da41 100644
--- a/roles/repos/tasks/main.yaml
+++ b/roles/repos/tasks/main.yaml
@@ -1,16 +1,41 @@
---
-# The following role lays down the correct repository and gpg key for yum
-- name: Ensure rhel 7 libra candidate exists in yum.repos.d
- copy: src=rhel-7-libra-candidate.repo dest=/etc/yum.repos.d/rhel-7-libra-candidate.repo
+# TODO: Add flag for enabling EPEL repo, default to false
-- name: Ensure rhel 7 extras exists in yum.repos.d
- copy: src=oso-rhui-rhel-7-extras.repo dest=/etc/yum.repos.d/oso-rhui-rhel-7-extras.repo
+- assert:
+ that: openshift_deployment_type in known_openshift_deployment_types
-- name: Ensure the kubernetes repo is available
- copy: src=epel7-kubernetes.repo dest=/etc/yum.repos.d/epel7-kubernetes.repo
+# TODO: remove this when origin support actually works
+- fail: msg="OpenShift Origin support is not currently enabled"
+ when: openshift_deployment_type == 'origin'
-- name: Ensure the origin repo is available
- copy: src=epel7-openshift.repo dest=/etc/yum.repos.d/epel7-openshift.repo
+- name: Create any additional repos that are defined
+ template:
+ src: yum_repo.j2
+ dest: /etc/yum.repos.d/openshift_additional.repo
+ when: openshift_additional_repos | length > 0
-- name: Ensure the rhel repo is available
- copy: src=oso-rhui-rhel-7-server.repo dest=/etc/yum.repos.d/oso-rhui-rhel-7-server.repo
+- name: Remove the additional repos if no longer defined
+ file:
+ dest: /etc/yum.repos.d/openshift_additional.repo
+ state: absent
+ when: openshift_additional_repos | length == 0
+
+- name: Remove any yum repo files for other deployment types
+ file:
+ path: "/etc/yum.repos.d/{{ item | basename }}"
+ state: absent
+ with_fileglob:
+ - '*/*'
+ when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$"))
+
+- name: Configure gpg keys if needed
+ copy: src={{ item }} dest=/etc/pki/rpm-gpg/
+ with_fileglob:
+ - "{{ openshift_deployment_type }}/*"
+ when: item | basename | match("RPM-GPG-KEY-")
+
+- name: Configure yum repositories
+ copy: src={{ item }} dest=/etc/yum.repos.d/
+ with_fileglob:
+ - "{{ openshift_deployment_type }}/*"
+ when: item | basename | search(".*\.repo$")
diff --git a/roles/repos/templates/yum_repo.j2 b/roles/repos/templates/yum_repo.j2
new file mode 100644
index 000000000..7ea2c7460
--- /dev/null
+++ b/roles/repos/templates/yum_repo.j2
@@ -0,0 +1,15 @@
+# {{ ansible_managed }}
+{% for repo in openshift_additional_repos %}
+[{{ repo.id }}]
+name={{ repo.name | default(repo.id) }}
+baseurl={{ repo.baseurl }}
+{% set enable_repo = repo.enabled | default('1') %}
+enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
+{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
+{% for key, value in repo.iteritems() %}
+{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
+{{ key }}={{ value }}
+{% endif %}
+{% endfor %}
+{% endfor %}
diff --git a/roles/repos/vars/main.yml b/roles/repos/vars/main.yml
new file mode 100644
index 000000000..bbb4c77e7
--- /dev/null
+++ b/roles/repos/vars/main.yml
@@ -0,0 +1,2 @@
+---
+known_openshift_deployment_types: ['origin', 'online', 'enterprise']