summaryrefslogtreecommitdiffstats
path: root/playbooks/aws/openshift-cluster/tasks
diff options
context:
space:
mode:
authorJason DeTiberus <jdetiber@redhat.com>2015-04-01 15:09:19 -0400
committerJason DeTiberus <jdetiber@redhat.com>2015-04-14 23:29:16 -0400
commit6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 (patch)
tree2519948f1eb8c372192ed4fd8805adc71da8433d /playbooks/aws/openshift-cluster/tasks
parentc85e91fdca031eba06481a24f74aa076ae9a4d38 (diff)
downloadopenshift-6a4b7a5eb6c4b5e747bab795e2428d7c3992f559.tar.gz
openshift-6a4b7a5eb6c4b5e747bab795e2428d7c3992f559.tar.bz2
openshift-6a4b7a5eb6c4b5e747bab795e2428d7c3992f559.tar.xz
openshift-6a4b7a5eb6c4b5e747bab795e2428d7c3992f559.zip
Configuration updates for latest builds and major refactor
Configuration updates for latest builds - Switch to using create-node-config - Switch sdn services to use etcd over SSL - This re-uses the client certificate deployed on each node - Additional node registration changes - Do not assume that metadata service is available in openshift_facts module - Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node - Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks - Start moving generated configs to /etc/openshift - Some custom module cleanup - Add known issue with ansible-1.9 to README_OSE.md - Update to genericize the kubernetes_register_node module - Default to use kubectl for commands - Allow for overriding kubectl_cmd - In openshift_register_node role, override kubectl_cmd to openshift_kube - Set default openshift_registry_url for enterprise when deployment_type is enterprise - Fix openshift_register_node for client config change - Ensure that master certs directory is created - Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node - Allow non-root user with sudo nopasswd access - Updates for README_OSE.md - Update byo inventory for adding additional comments - Updates for node cert/config sync to work with non-root user using sudo - Move node config/certs to /etc/openshift/node - Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154 Create common playbooks - create common/openshift-master/config.yml - create common/openshift-node/config.yml - update playbooks to use new common playbooks - update launch playbooks to call update playbooks - fix openshift_registry and openshift_node_ip usage Set default deployment type to origin - openshift_repo updates for enabling origin deployments - also separate repo and gpgkey file structure - remove kubernetes repo since it isn't currently needed - full deployment type support for bin/cluster - honor OS_DEPLOYMENT_TYPE env variable - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to origin installs Additional changes: - Add separate config action to bin/cluster that runs ansible config but does not update packages - Some more duplication reduction in cluster playbooks. - Rename task files in playbooks dirs to have tasks in their name for clarity. - update aws/gce scripts to use a directory for inventory (otherwise when there are no hosts returned from dynamic inventory there is an error) libvirt refactor and update - add libvirt dynamic inventory - updates to use dynamic inventory for libvirt
Diffstat (limited to 'playbooks/aws/openshift-cluster/tasks')
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml69
1 files changed, 69 insertions, 0 deletions
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
new file mode 100644
index 000000000..58b4082df
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -0,0 +1,69 @@
+---
+# TODO: modify machine_image based on deployment_type
+- set_fact:
+ machine_type: "{{ lookup('env', 'ec2_instance_type') | default('m3.large', true) }}"
+ machine_image: "{{ lookup('env', 'ec2_ami') | default(deployment_vars[deployment_type].image, true) }}"
+ machine_region: "{{ lookup('env', 'ec2_region') | default(deployment_vars[deployment_type].region, true) }}"
+ machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
+ created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+ security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
+ env: "{{ cluster }}"
+ host_type: "{{ type }}"
+ env_host_type: "{{ cluster }}-openshift-{{ type }}"
+
+- name: Launch instance(s)
+ ec2:
+ state: present
+ region: "{{ machine_region }}"
+ keypair: "{{ machine_keypair }}"
+ group: "{{ security_group }}"
+ instance_type: "{{ machine_type }}"
+ image: "{{ machine_image }}"
+ count: "{{ instances | oo_len }}"
+ wait: yes
+ instance_tags:
+ created-by: "{{ created_by }}"
+ env: "{{ env }}"
+ host-type: "{{ host_type }}"
+ env-host-type: "{{ env_host_type }}"
+ deployment-type: "{{ deployment_type }}"
+ register: ec2
+
+- name: Add Name tag to instances
+ ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
+ with_together:
+ - instances
+ - ec2.instances
+ args:
+ tags:
+ Name: "{{ item.0 }}"
+
+- set_fact:
+ instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_deployment-type_{{ deployment_type }}
+
+- name: Add new instances groups and variables
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: "{{ instance_groups }}"
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ with_together:
+ - instances
+ - ec2.instances
+
+- name: Wait for ssh
+ wait_for: "port=22 host={{ item.dns_name }}"
+ with_items: ec2.instances
+
+- name: Wait for user setup
+ command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+ register: result
+ until: result.rc == 0
+ retries: 20
+ delay: 10
+ with_together:
+ - instances
+ - ec2.instances