diff options
18 files changed, 701 insertions, 30 deletions
diff --git a/git/pylint.sh b/git/pylint.sh index f29c055dc..3acf9cc8c 100755 --- a/git/pylint.sh +++ b/git/pylint.sh @@ -7,6 +7,7 @@ ANSIBLE_UPSTREAM_FILES=(      'inventory/libvirt/hosts/libvirt_generic.py'      'inventory/openstack/hosts/nova.py'      'lookup_plugins/sequence.py' +    'playbooks/gce/openshift-cluster/library/gce.py'    )  OLDREV=$1 diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 8402b3579..4839c100b 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -1,7 +1,20 @@ +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../aws/openshift-cluster/vars.yml -  - ../../aws/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user:     "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo:         "{{ deployment_vars[deployment_type].become }}" @@ -21,3 +34,4 @@      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 7d5776ae6..d22c86cda 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -150,6 +150,7 @@      groups: "{{ instance_groups }}"      ec2_private_ip_address: "{{ item.1.private_ip }}"      ec2_ip_address: "{{ item.1.public_ip }}" +    ec2_tag_sub-host-type: "{{ sub_host_type }}"      openshift_node_labels: "{{ node_label }}"      logrotate_scripts: "{{ logrotate }}"    with_together: diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 index 2a3974a8c..b1087f9c4 100644 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -3,8 +3,10 @@  mounts:  - [ xvdb ]  - [ ephemeral0 ] +{% endif %}  write_files: +{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}  - content: |      DEVS=/dev/xvdb      VG=docker_vg @@ -12,8 +14,7 @@ write_files:    owner: root:root    permissions: '0644'  {% endif %} - -{% if deployment_vars[deployment_type].become %} +{% if deployment_vars[deployment_type].become | bool %}  - path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty    permissions: 440    content: | diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index bd31c42dd..d762203b2 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Update - Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Update - Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index 8bda72ac2..d774187f0 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -17,7 +17,7 @@ deployment_rhel7_ent_base:  deployment_vars:    origin:      # centos-7, requires marketplace -    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-61bbf104', True) }}" +    image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"      image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"      region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"      ssh_user: centos diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml index 1474bb3ca..0a37d4597 100644 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -1,4 +1,6 @@  --- +- include: evaluate_groups.yml +  - hosts: oo_hosts_to_update    vars:      openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml index 475d29293..b973c513f 100644 --- a/playbooks/gce/openshift-cluster/config.yml +++ b/playbooks/gce/openshift-cluster/config.yml @@ -1,8 +1,23 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" +      ansible_become: "{{ deployment_vars[deployment_type].become }}" +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../gce/openshift-cluster/vars.yml -  - ../../gce/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py new file mode 100644 index 000000000..fcaa3b850 --- /dev/null +++ b/playbooks/gce/openshift-cluster/library/gce.py @@ -0,0 +1,543 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +DOCUMENTATION = ''' +--- +module: gce +version_added: "1.4" +short_description: create or terminate GCE instances +description: +     - Creates or terminates Google Compute Engine (GCE) instances.  See +       U(https://cloud.google.com/products/compute-engine) for an overview. +       Full install/configuration instructions for the gce* modules can +       be found in the comments of ansible/test/gce_tests.py. +options: +  image: +    description: +       - image string to use for the instance +    required: false +    default: "debian-7" +  instance_names: +    description: +      - a comma-separated list of instance names to create or destroy +    required: false +    default: null +  machine_type: +    description: +      - machine type to use for the instance, use 'n1-standard-1' by default +    required: false +    default: "n1-standard-1" +  metadata: +    description: +      - a hash/dictionary of custom data for the instance; +        '{"key":"value", ...}' +    required: false +    default: null +  service_account_email: +    version_added: "1.5.1" +    description: +      - service account email +    required: false +    default: null +  service_account_permissions: +    version_added: "2.0" +    description: +      - service account permissions (see +        U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), +        --scopes section for detailed information) +    required: false +    default: null +    choices: [ +      "bigquery", "cloud-platform", "compute-ro", "compute-rw", +      "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", +      "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", +      "storage-rw", "taskqueue", "userinfo-email" +    ] +  pem_file: +    version_added: "1.5.1" +    description: +      - path to the pem file associated with the service account email +    required: false +    default: null +  project_id: +    version_added: "1.5.1" +    description: +      - your GCE project ID +    required: false +    default: null +  name: +    description: +      - identifier when working with a single instance +    required: false +  network: +    description: +      - name of the network, 'default' will be used if not specified +    required: false +    default: "default" +  persistent_boot_disk: +    description: +      - if set, create the instance with a persistent boot disk +    required: false +    default: "false" +  disks: +    description: +      - a list of persistent disks to attach to the instance; a string value +        gives the name of the disk; alternatively, a dictionary value can +        define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry +        will be the boot disk (which must be READ_WRITE). +    required: false +    default: null +    version_added: "1.7" +  state: +    description: +      - desired state of the resource +    required: false +    default: "present" +    choices: ["active", "present", "absent", "deleted"] +  tags: +    description: +      - a comma-separated list of tags to associate with the instance +    required: false +    default: null +  zone: +    description: +      - the GCE zone to use +    required: true +    default: "us-central1-a" +  ip_forward: +    version_added: "1.9" +    description: +      - set to true if the instance can forward ip packets (useful for +        gateways) +    required: false +    default: "false" +  external_ip: +    version_added: "1.9" +    description: +      - type of external ip, ephemeral by default +    required: false +    default: "ephemeral" +  disk_auto_delete: +    version_added: "1.9" +    description: +      - if set boot disk will be removed after instance destruction +    required: false +    default: "true" + +requirements: +    - "python >= 2.6" +    - "apache-libcloud >= 0.13.3" +notes: +  - Either I(name) or I(instance_names) is required. +author: "Eric Johnson (@erjohnso) <erjohnso@google.com>" +''' + +EXAMPLES = ''' +# Basic provisioning example.  Create a single Debian 7 instance in the +# us-central1-a Zone of n1-standard-1 machine type. +- local_action: +    module: gce +    name: test-instance +    zone: us-central1-a +    machine_type: n1-standard-1 +    image: debian-7 + +# Example using defaults and with metadata to create a single 'foo' instance +- local_action: +    module: gce +    name: foo +    metadata: '{"db":"postgres", "group":"qa", "id":500}' + + +# Launch instances from a control node, runs some tasks on the new instances, +# and then terminate them +- name: Create a sandbox instance +  hosts: localhost +  vars: +    names: foo,bar +    machine_type: n1-standard-1 +    image: debian-6 +    zone: us-central1-a +    service_account_email: unique-email@developer.gserviceaccount.com +    pem_file: /path/to/pem_file +    project_id: project-id +  tasks: +    - name: Launch instances +      local_action: gce instance_names={{names}} machine_type={{machine_type}} +                    image={{image}} zone={{zone}} +                    service_account_email={{ service_account_email }} +                    pem_file={{ pem_file }} project_id={{ project_id }} +      register: gce +    - name: Wait for SSH to come up +      local_action: wait_for host={{item.public_ip}} port=22 delay=10 +                    timeout=60 state=started +      with_items: {{gce.instance_data}} + +- name: Configure instance(s) +  hosts: launched +  sudo: True +  roles: +    - my_awesome_role +    - my_awesome_tasks + +- name: Terminate instances +  hosts: localhost +  connection: local +  tasks: +    - name: Terminate instances that were previously launched +      local_action: +        module: gce +        state: 'absent' +        instance_names: {{gce.instance_names}} + +''' + +try: +    import libcloud +    from libcloud.compute.types import Provider +    from libcloud.compute.providers import get_driver +    from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ +        ResourceExistsError, ResourceInUseError, ResourceNotFoundError +    _ = Provider.GCE +    HAS_LIBCLOUD = True +except ImportError: +    HAS_LIBCLOUD = False + +try: +    from ast import literal_eval +    HAS_PYTHON26 = True +except ImportError: +    HAS_PYTHON26 = False + + +def get_instance_info(inst): +    """Retrieves instance information from an instance object and returns it +    as a dictionary. + +    """ +    metadata = {} +    if 'metadata' in inst.extra and 'items' in inst.extra['metadata']: +        for md in inst.extra['metadata']['items']: +            metadata[md['key']] = md['value'] + +    try: +        netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] +    except: +        netname = None +    if 'disks' in inst.extra: +        disk_names = [disk_info['source'].split('/')[-1] +                      for disk_info +                      in sorted(inst.extra['disks'], +                                key=lambda disk_info: disk_info['index'])] +    else: +        disk_names = [] + +    if len(inst.public_ips) == 0: +        public_ip = None +    else: +        public_ip = inst.public_ips[0] + +    return({ +        'image': inst.image is not None and inst.image.split('/')[-1] or None, +        'disks': disk_names, +        'machine_type': inst.size, +        'metadata': metadata, +        'name': inst.name, +        'network': netname, +        'private_ip': inst.private_ips[0], +        'public_ip': public_ip, +        'status': ('status' in inst.extra) and inst.extra['status'] or None, +        'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], +        'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, +    }) + + +def create_instances(module, gce, instance_names): +    """Creates new instances. Attributes other than instance_names are picked +    up from 'module' + +    module : AnsibleModule object +    gce: authenticated GCE libcloud driver +    instance_names: python list of instance names to create + +    Returns: +        A list of dictionaries with instance information +        about the instances that were launched. + +    """ +    image = module.params.get('image') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    disks = module.params.get('disks') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    external_ip = module.params.get('external_ip') +    disk_auto_delete = module.params.get('disk_auto_delete') +    service_account_permissions = module.params.get('service_account_permissions') +    service_account_email = module.params.get('service_account_email') + +    if external_ip == "none": +        external_ip = None + +    new_instances = [] +    changed = False + +    lc_image = gce.ex_get_image(image) +    lc_disks = [] +    disk_modes = [] +    for i, disk in enumerate(disks or []): +        if isinstance(disk, dict): +            lc_disks.append(gce.ex_get_volume(disk['name'])) +            disk_modes.append(disk['mode']) +        else: +            lc_disks.append(gce.ex_get_volume(disk)) +            # boot disk is implicitly READ_WRITE +            disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE') +    lc_network = gce.ex_get_network(network) +    lc_machine_type = gce.ex_get_size(machine_type) +    lc_zone = gce.ex_get_zone(zone) + +    # Try to convert the user's metadata value into the format expected +    # by GCE.  First try to ensure user has proper quoting of a +    # dictionary-like syntax using 'literal_eval', then convert the python +    # dict into a python list of 'key' / 'value' dicts.  Should end up +    # with: +    # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] +    if metadata: +        if isinstance(metadata, dict): +            md = metadata +        else: +            try: +                md = literal_eval(str(metadata)) +                if not isinstance(md, dict): +                    raise ValueError('metadata must be a dict') +            except ValueError as e: +                module.fail_json(msg='bad metadata: %s' % str(e)) +            except SyntaxError as e: +                module.fail_json(msg='bad metadata syntax') + +    if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': +        items = [] +        for k, v in md.items(): +            items.append({"key": k, "value": v}) +        metadata = {'items': items} +    else: +        metadata = md + +    ex_sa_perms = [] +    bad_perms = [] +    if service_account_permissions: +        for perm in service_account_permissions: +            if perm not in gce.SA_SCOPES_MAP.keys(): +                bad_perms.append(perm) +        if len(bad_perms) > 0: +            module.fail_json(msg='bad permissions: %s' % str(bad_perms)) +        if service_account_email: +            ex_sa_perms.append({'email': service_account_email}) +        else: +            ex_sa_perms.append({'email': "default"}) +        ex_sa_perms[0]['scopes'] = service_account_permissions + +    # These variables all have default values but check just in case +    if not lc_image or not lc_network or not lc_machine_type or not lc_zone: +        module.fail_json(msg='Missing required create instance variable', +                         changed=False) + +    for name in instance_names: +        pd = None +        if lc_disks: +            pd = lc_disks[0] +        elif persistent_boot_disk: +            try: +                pd = gce.create_volume(None, "%s" % name, image=lc_image) +            except ResourceExistsError: +                pd = gce.ex_get_volume("%s" % name, lc_zone) +        inst = None +        try: +            inst = gce.create_node( +                name, lc_machine_type, lc_image, location=lc_zone, +                ex_network=network, ex_tags=tags, ex_metadata=metadata, +                ex_boot_disk=pd, ex_can_ip_forward=ip_forward, +                external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, +                ex_service_accounts=ex_sa_perms +            ) +            changed = True +        except ResourceExistsError: +            inst = gce.ex_get_node(name, lc_zone) +        except GoogleBaseError as e: +            module.fail_json(msg='Unexpected error attempting to create ' + +                             'instance %s, error: %s' % (name, e.value)) + +        for i, lc_disk in enumerate(lc_disks): +            # Check whether the disk is already attached +            if (len(inst.extra['disks']) > i): +                attached_disk = inst.extra['disks'][i] +                if attached_disk['source'] != lc_disk.extra['selfLink']: +                    module.fail_json( +                        msg=("Disk at index %d does not match: requested=%s found=%s" % ( +                            i, lc_disk.extra['selfLink'], attached_disk['source']))) +                elif attached_disk['mode'] != disk_modes[i]: +                    module.fail_json( +                        msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % ( +                            i, disk_modes[i], attached_disk['mode']))) +                else: +                    continue +            gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i]) +            # Work around libcloud bug: attached volumes don't get added +            # to the instance metadata. get_instance_info() only cares about +            # source and index. +            if len(inst.extra['disks']) != i+1: +                inst.extra['disks'].append( +                    {'source': lc_disk.extra['selfLink'], 'index': i}) + +        if inst: +            new_instances.append(inst) + +    instance_names = [] +    instance_json_data = [] +    for inst in new_instances: +        d = get_instance_info(inst) +        instance_names.append(d['name']) +        instance_json_data.append(d) + +    return (changed, instance_json_data, instance_names) + + +def terminate_instances(module, gce, instance_names, zone_name): +    """Terminates a list of instances. + +    module: Ansible module object +    gce: authenticated GCE connection object +    instance_names: a list of instance names to terminate +    zone_name: the zone where the instances reside prior to termination + +    Returns a dictionary of instance names that were terminated. + +    """ +    changed = False +    terminated_instance_names = [] +    for name in instance_names: +        inst = None +        try: +            inst = gce.ex_get_node(name, zone_name) +        except ResourceNotFoundError: +            pass +        except Exception as e: +            module.fail_json(msg=unexpected_error_msg(e), changed=False) +        if inst: +            gce.destroy_node(inst) +            terminated_instance_names.append(inst.name) +            changed = True + +    return (changed, terminated_instance_names) + + +def main(): +    module = AnsibleModule( +        argument_spec=dict( +            image=dict(default='debian-7'), +            instance_names=dict(), +            machine_type=dict(default='n1-standard-1'), +            metadata=dict(), +            name=dict(), +            network=dict(default='default'), +            persistent_boot_disk=dict(type='bool', default=False), +            disks=dict(type='list'), +            state=dict(choices=['active', 'present', 'absent', 'deleted'], +                       default='present'), +            tags=dict(type='list'), +            zone=dict(default='us-central1-a'), +            service_account_email=dict(), +            service_account_permissions=dict(type='list'), +            pem_file=dict(), +            project_id=dict(), +            ip_forward=dict(type='bool', default=False), +            external_ip=dict(choices=['ephemeral', 'none'], +                             default='ephemeral'), +            disk_auto_delete=dict(type='bool', default=True), +        ) +    ) + +    if not HAS_PYTHON26: +        module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+") +    if not HAS_LIBCLOUD: +        module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module') + +    gce = gce_connect(module) + +    image = module.params.get('image') +    instance_names = module.params.get('instance_names') +    machine_type = module.params.get('machine_type') +    metadata = module.params.get('metadata') +    name = module.params.get('name') +    network = module.params.get('network') +    persistent_boot_disk = module.params.get('persistent_boot_disk') +    state = module.params.get('state') +    tags = module.params.get('tags') +    zone = module.params.get('zone') +    ip_forward = module.params.get('ip_forward') +    changed = False + +    inames = [] +    if isinstance(instance_names, list): +        inames = instance_names +    elif isinstance(instance_names, str): +        inames = instance_names.split(',') +    if name: +        inames.append(name) +    if not inames: +        module.fail_json(msg='Must specify a "name" or "instance_names"', +                         changed=False) +    if not zone: +        module.fail_json(msg='Must specify a "zone"', changed=False) + +    json_output = {'zone': zone} +    if state in ['absent', 'deleted']: +        json_output['state'] = 'absent' +        (changed, terminated_instance_names) = terminate_instances( +            module, gce, inames, zone) + +        # based on what user specified, return the same variable, although +        # value could be different if an instance could not be destroyed +        if instance_names: +            json_output['instance_names'] = terminated_instance_names +        elif name: +            json_output['name'] = name + +    elif state in ['active', 'present']: +        json_output['state'] = 'present' +        (changed, instance_data, instance_name_list) = create_instances( +            module, gce, inames) +        json_output['instance_data'] = instance_data +        if instance_names: +            json_output['instance_names'] = instance_name_list +        elif name: +            json_output['name'] = name + +    json_output['changed'] = changed +    module.exit_json(**json_output) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.gce import * +if __name__ == '__main__': +    main() diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index e3efd8566..c5c479052 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -17,6 +17,11 @@        - clusterid-{{ cluster_id }}        - host-type-{{ type }}        - sub-host-type-{{ g_sub_host_type }} +    metadata: +      startup-script: | +        #!/bin/bash +        echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }} +    when: instances |length > 0    register: gce diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 9b7a2777a..332f27da7 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,12 +1,25 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -14,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml index 81a6fff0d..032d4cf68 100644 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -2,10 +2,23 @@  # TODO: need to figure out a plan for setting hostname, currently the default  # is localhost, so no hostname value (or public_hostname) value is getting  # assigned +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../libvirt/openshift-cluster/vars.yml -  - ../../libvirt/openshift-cluster/cluster_hosts.yml    vars:      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"      g_sudo: "{{ deployment_vars[deployment_type].become }}" @@ -21,3 +34,4 @@      os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"      openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"      openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" +    openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml index 9b7a2777a..28362c984 100644 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - name: Populate oo_hosts_to_update group    hosts: localhost    connection: local @@ -14,7 +30,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml index 9c0ca9af9..6e4f414d6 100644 --- a/playbooks/openstack/openshift-cluster/config.yml +++ b/playbooks/openstack/openshift-cluster/config.yml @@ -1,8 +1,21 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: ../../common/openshift-cluster/config.yml -  vars_files: -  - ../../openstack/openshift-cluster/vars.yml -  - ../../openstack/openshift-cluster/cluster_hosts.yml    vars:      g_nodeonmaster: true      g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 2f05c3adc..1d54a9c39 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -591,11 +591,17 @@ resources:      type: OS::Heat::MultipartMime      properties:        parts: -        - config: { get_file: user-data }          - config:              str_replace:                template: |                  #cloud-config +                disable_root: true + +                system_info: +                  default_user: +                    name: openshift +                    sudo: ["ALL=(ALL) NOPASSWD: ALL"] +                  write_files:                    - path: /etc/sudoers.d/00-openshift-no-requiretty                      permissions: 440 diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index 3d4fe42d0..6429a6755 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -46,7 +46,7 @@               -P master_flavor={{ openstack_flavor["master"] }}               -P node_flavor={{ openstack_flavor["node"] }}               -P infra_flavor={{ openstack_flavor["infra"] }} -             -P dns_flavor=m1.small +             -P dns_flavor={{ openshift_flavor["dns"] }}               openshift-ansible-{{ cluster_id }}-stack'    - name: Wait for OpenStack Stack readiness diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 539af6524..6d4d23963 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -1,4 +1,20 @@  --- +- hosts: localhost +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - add_host: +      name: "{{ item }}" +      groups: l_oo_all_hosts +    with_items: g_all_hosts + +- hosts: l_oo_all_hosts +  gather_facts: no +  tasks: +  - include_vars: vars.yml +  - include_vars: cluster_hosts.yml +  - include: dns.yml  - name: Populate oo_hosts_to_update group @@ -6,9 +22,6 @@    connection: local    become: no    gather_facts: no -  vars_files: -  - vars.yml -  - cluster_hosts.yml    tasks:    - name: Evaluate oo_hosts_to_update      add_host: @@ -16,7 +29,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: "{{ g_all_hosts | default([]) }}" +    with_items: g_all_hosts | default([])  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index 84cba0506..bc53a51b0 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -13,6 +13,7 @@ openstack_ssh_public_key:       "{{ lookup('file', lookup('oo_option', 'public_k  openstack_ssh_access_from:      "{{ lookup('oo_option', 'ssh_from')          |                                      default('0.0.0.0/0',                     True) }}"  openstack_flavor: +  dns:    "{{ lookup('oo_option', 'dns_flavor'       ) | default('m1.small',  True) }}"    etcd:   "{{ lookup('oo_option', 'etcd_flavor'      ) | default('m1.small',  True) }}"    master: "{{ lookup('oo_option', 'master_flavor'    ) | default('m1.small',  True) }}"    infra:  "{{ lookup('oo_option', 'infra_flavor'     ) | default('m1.small',  True) }}"  | 
