summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README_openstack.md1
-rw-r--r--docs/best_practices_guide.adoc12
-rw-r--r--filter_plugins/oo_filters.py4
-rw-r--r--playbooks/aws/openshift-cluster/list.yml9
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml16
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml14
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml2
-rw-r--r--roles/openshift_facts/tasks/main.yml2
-rw-r--r--roles/openshift_loadbalancer/meta/main.yml5
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml24
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j210
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j23
-rw-r--r--roles/openshift_master_facts/tasks/main.yml1
-rw-r--r--roles/openshift_metrics/tasks/install.yml27
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_node/tasks/main.yml5
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
22 files changed, 100 insertions, 617 deletions
diff --git a/README_openstack.md b/README_openstack.md
index d3d1f9052..2578488c7 100644
--- a/README_openstack.md
+++ b/README_openstack.md
@@ -50,6 +50,7 @@ The following options are used only by `heat_stack.yaml`. They are so used only
* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
* `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
* `node_port_from` (default to `0.0.0.0/0`): IPs authorized to connect to the services exposed via nodePort
+* `heat_timeout` (default to `3`): Timeout (in minutes) passed to heat for create or update stack.
Creating a cluster
diff --git a/docs/best_practices_guide.adoc b/docs/best_practices_guide.adoc
index cac9645a6..e9d904965 100644
--- a/docs/best_practices_guide.adoc
+++ b/docs/best_practices_guide.adoc
@@ -52,11 +52,11 @@ If mode lines for other editors are needed, please open a GitHub issue.
=== Method Signatures
'''
-[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
+[[When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used]]
[cols="2v,v"]
|===
-| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
-| When adding a new paramemter to an existing method, a default value SHOULD be used
+| <<When-adding-a-new-parameter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
+| When adding a new parameter to an existing method, a default value SHOULD be used
|===
The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -194,7 +194,7 @@ The purpose of this rule is to make it easy to include custom modules in our pla
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
|===
-When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -222,7 +222,7 @@ When a module has several parameters that are being passed in, it's hard to see
| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
|===
-Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each parameter.
.Bad:
[source,yaml]
@@ -432,7 +432,7 @@ This is very useful when developing and debugging new tasks. It can also signifi
[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
[cols="2v,v"]
|===
-| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
+| <<Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule>>
| Ansible Roles SHOULD be named like technology_component[_subcomponent].
|===
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 97eacf9bf..38bc3ad6b 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -608,8 +608,8 @@ class FilterModule(object):
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
- 'public IP': host['ansible_ssh_host'],
- 'private IP': host['ansible_default_ipv4']['address']})
+ 'public IP': host['oo_public_ipv4'],
+ 'private IP': host['oo_private_ipv4']})
except KeyError:
pass
return clusters
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 4934ae6d0..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..2f384ddea 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,5 +1,21 @@
---
# NOTE: requires openshift_facts be run
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
+ # https://github.com/openshift/openshift-ansible/issues/1138
+ - name: Check for bad combinations of yum and subscription-manager
+ command: >
+ {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
+ register: yum_ver_test
+ changed_when: false
+ - fail:
+ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
+ when: "'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 34dcd2496..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 7c8189224..b7604580c 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,6 +9,7 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
# unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 86d5d0aad..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index eb2c4269a..7e037f2af 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -25,7 +25,7 @@
- name: Create or Update OpenStack Stack
command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout 3
+ --timeout {{ openstack_heat_timeout }}
-P cluster_env={{ cluster_env }}
-P cluster_id={{ cluster_id }}
-P subnet_24_prefix={{ openstack_subnet_24_prefix }}
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 62111dacf..79b336ce7 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -14,6 +14,8 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
default('0.0.0.0/0', True) }}"
+openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
+ default('3', True) }}"
openstack_flavor:
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index b0785a9e4..4d4a232cc 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -38,6 +38,8 @@
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
- name: Set repoquery command
set_fact:
diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml
index e1d78cfd0..0b29df2a0 100644
--- a/roles/openshift_loadbalancer/meta/main.yml
+++ b/roles/openshift_loadbalancer/meta/main.yml
@@ -17,4 +17,9 @@ dependencies:
port: "9000/tcp"
- service: haproxy balance
port: "{{ openshift_master_api_port | default(8443) }}/tcp"
+- role: os_firewall
+ os_firewall_allow:
+ - service: nuage mon
+ port: "{{ nuage_mon_rest_server_port | default(9443) }}/tcp"
+ when: openshift_use_nuage | default(false) | bool
- role: openshift_repos
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 56110c28f..11e010716 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -127,16 +127,22 @@
- name: Preserve Master Proxy Config options
command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master
- register: master_proxy
+ register: master_proxy_result
failed_when: false
changed_when: false
+- set_fact:
+ master_proxy: "{{ master_proxy_result.stdout_lines | default([]) }}"
+
- name: Preserve Master AWS options
command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master
- register: master_aws
+ register: master_aws_result
failed_when: false
changed_when: false
+- set_fact:
+ master_aws: "{{ master_aws_result.stdout_lines | default([]) }}"
+
- name: Create the master service env file
template:
src: "atomic-openshift-master.j2"
@@ -144,17 +150,3 @@
backup: true
notify:
- restart master
-
-- name: Restore Master Proxy Config Options
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- line: "{{ item }}"
- with_items: "{{ master_proxy.stdout_lines | default([]) }}"
- when: master_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
-
-- name: Restore Master AWS Options
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-master
- line: "{{ item }}"
- with_items: "{{ master_aws.stdout_lines | default([]) }}"
- when: master_aws.rc == 0 and not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 26f0240ec..7aea89578 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -8,6 +8,11 @@ IMAGE_VERSION={{ openshift_image_tag }}
AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
+{% if not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined) %}
+{% for item in master_aws %}
+{{ item }}
+{% endfor %}
+{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%}
@@ -26,3 +31,8 @@ HTTPS_PROXY={{ openshift.common.https_proxy | default('')}}
{% if 'no_proxy' in openshift.common %}
NO_PROXY={{ openshift.common.no_proxy | default('') | join(',') }},{{ openshift.common.portal_net }},{{ openshift.master.sdn_cluster_network_cidr }}
{% endif %}
+{% if not ('https_proxy' in openshift.common or 'https_proxy' in openshift.common or 'no_proxy' in openshift.common) %}
+{% for item in master_proxy %}
+{{ item }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 4d45e8591..a52ae578c 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -131,6 +131,7 @@ kubernetesMasterConfig:
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
+ schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
servicesNodePortRange: ""
servicesSubnet: {{ openshift.common.portal_net }}
@@ -158,7 +159,7 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage %}
+{% if openshift.common.use_openshift_sdn or openshift.common.use_nuage or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index e0c0fc644..62ac1aef5 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -80,3 +80,4 @@
controllers_env_vars: "{{ openshift_master_controllers_env_vars | default(None) }}"
audit_config: "{{ openshift_master_audit_config | default(None) }}"
metrics_public_url: "{% if openshift_hosted_metrics_deploy | default(false) %}https://{{ metrics_hostname }}/hawkular/metrics{% endif %}"
+ scheduler_args: "{{ openshift_master_scheduler_args | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_metrics/tasks/install.yml
index 4976c7153..98e21375a 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_metrics/tasks/install.yml
@@ -37,6 +37,24 @@
system:serviceaccount:openshift-infra:metrics-deployer
when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
+- name: Test hawkular view permissions
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
+ register: view_rolebindings
+ changed_when: false
+
+- name: Add view permissions to hawkular SA
+ command: >
+ {{ openshift.common.client_binary }} adm
+ --config={{ openshift_metrics_kubeconfig }}
+ --namespace openshift-infra
+ policy add-role-to-user view
+ system:serviceaccount:openshift-infra:hawkular
+ when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
+
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
@@ -71,7 +89,14 @@
set_fact:
deployer_cmd: "{{ openshift.common.client_binary }} process -f \
{{ hosted_base }}/metrics-deployer.yaml -v \
- HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }},USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }},DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }},METRIC_DURATION={{ openshift.hosted.metrics.duration }},METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}{{ image_prefix }}{{ image_version }},MODE={{ deployment_mode }} \
+ HAWKULAR_METRICS_HOSTNAME={{ metrics_hostname }} \
+ -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
+ -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
+ -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
+ -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
+ {{ image_prefix }} \
+ {{ image_version }} \
+ -v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
--config={{ openshift_metrics_kubeconfig }} \
create -o name -f -"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 88432a9f8..26af279b1 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -36,10 +36,8 @@
metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- cassandra_nodes: "{{ ',CASSANDRA_NODES=' ~ openshift.hosted.metrics.cassandra_nodes if 'cassandra' in openshift.hosted.metrics else '' }}"
- cassandra_pv_size: "{{ ',CASSANDRA_PV_SIZE=' ~ openshift.hosted.metrics.storage_volume_size if openshift.hosted.metrics.storage_volume_size | default(none) is not none else '' }}"
- image_prefix: "{{ ',IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer_prefix if 'deployer_prefix' in openshift.hosted.metrics else '' }}"
- image_version: "{{ ',IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer_version if 'deployer_version' in openshift.hosted.metrics else '' }}"
+ image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
+ image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
- name: Check for existing metrics pods
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 474df497e..6022694bc 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -77,6 +77,11 @@
- set_fact:
ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+- file:
+ dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
+ state: directory
+ when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
template:
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 9bcaf4d84..55ae4bf54 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -27,7 +27,7 @@ networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool %}
+{% if openshift.common.use_openshift_sdn | bool or openshift.common.use_nuage | bool or openshift.common.sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}