--- # TODO: Add support for choosing base image based on deployment_type and os # wanted (os wanted needs support added in bin/cluster with sane defaults: # fedora/centos for origin, rhel for online/enterprise) # TODO: create a role to encapsulate some of this complexity, possibly also # create a module to manage the storage tasks, network tasks, and possibly # even handle the libvirt tasks to set metadata in the domain xml and be able # to create/query data about vms without having to use xml the python libvirt # bindings look like a good candidate for this - name: Download Base Cloud image get_url: url: '{{ image_url }}' sha256sum: '{{ image_sha256 }}' dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' register: downloaded_image - name: Uncompress Base Cloud image command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' args: creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' when: image_compression in ["xz"] and downloaded_image.changed - name: Create the cloud-init config drive path file: dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' state: directory with_items: instances - name: Create the cloud-init config drive files template: src: '{{ item[1] }}' dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' with_nested: - instances - [ user-data, meta-data ] - name: Create the cloud-init config drive command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' args: chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' with_items: instances - name: Refresh the libvirt storage pool for openshift command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - name: Create VMs drives command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' with_items: instances - name: Create VMs virt: name: '{{ item }}' command: define xml: "{{ lookup('template', '../templates/domain.xml') }}" uri: '{{ libvirt_uri }}' with_items: instances - name: Start VMs virt: name: '{{ item }}' state: running uri: '{{ libvirt_uri }}' with_items: instances - name: Wait for the VMs to get an IP shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}''' register: nb_allocated_ips until: nb_allocated_ips.stdout == '{{ instances | length }}' retries: 60 delay: 3 when: instances | length != 0 - name: Collect IP addresses of the VMs shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' register: scratch_ip with_items: instances - set_fact: ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}" - name: Add new instances add_host: hostname: '{{ item.0 }}' ansible_ssh_host: '{{ item.1 }}' ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}' with_together: - instances - ips - name: Wait for ssh wait_for: host: '{{ item }}' port: 22 with_items: ips - name: Wait for openshift user setup command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup' register: result until: result.rc == 0 retries: 30 delay: 1 with_together: - instances - ips