From f929f3f94c7c89e40f9c8e2b85293f496101aebe Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Thu, 11 Dec 2014 13:52:16 +0100 Subject: Adding AWS support to openshift-ansible module - Update documentation to say that ssh configuration need to point to the private key file - Removing the -p argument when calling time .... because it is misintrepreted by ruby - Turning the cluster.sh to agnostic in its help/error message by replacing explicit reference to GCE by a a variable - Fixing a bug within the playbooks that incorrectly references the minions and master fact group. - Adding playbooks for AWS, which are almost of copy/paste for those of GCE - Added environment variable OO_PROVIDER to allow definition of the provider. Defaults is gce - TODO implement the terminate.yml cookbook --- README_AWS.md | 16 ++++++- cluster.sh | 15 +++++-- lib/ansible_helper.rb | 4 +- playbooks/aws/openshift-master/config.yml | 40 ++++++++++++++++++ playbooks/aws/openshift-master/launch.yml | 69 +++++++++++++++++++++++++++++++ playbooks/aws/openshift-master/vars.yml | 0 playbooks/aws/openshift-minion/config.yml | 40 ++++++++++++++++++ playbooks/aws/openshift-minion/launch.yml | 69 +++++++++++++++++++++++++++++++ playbooks/aws/openshift-minion/vars.yml | 0 9 files changed, 245 insertions(+), 8 deletions(-) create mode 100644 playbooks/aws/openshift-master/config.yml create mode 100644 playbooks/aws/openshift-master/launch.yml create mode 100644 playbooks/aws/openshift-master/vars.yml create mode 100644 playbooks/aws/openshift-minion/config.yml create mode 100644 playbooks/aws/openshift-minion/launch.yml create mode 100644 playbooks/aws/openshift-minion/vars.yml diff --git a/README_AWS.md b/README_AWS.md index 2602f9883..c0f2bce75 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -14,13 +14,25 @@ Create a credentials file export AWS_ACCESS_KEY_ID='AKIASTUFF' export AWS_SECRET_ACCESS_KEY='STUFF' ``` - 1. source this file ``` source ~/.aws_creds ``` +Note: You must source this file in each shell that you want to run cloud.rb + + +(Optional) Setup your $HOME/.ssh/config file +------------------------------------------- +In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' +to setup a private key file to allow ansible to connect to the created hosts. + +To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. +''' +Host *.compute-1.amazonaws.com + PrivateKey $HOME/.ssh/my_private_key.pem +''' -1. Note: You must source this file in each shell that you want to run cloud.rb +Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. Install Dependencies diff --git a/cluster.sh b/cluster.sh index 035602620..73c87eb95 100755 --- a/cluster.sh +++ b/cluster.sh @@ -2,7 +2,16 @@ MINIONS=3 MASTERS=1 -PROVIDER=gce + +# If the environment variable OO_PROVDER is defined, it used for the provider +PROVIDER=$OO_PROVIDER +# Otherwise, default is gce (Google Compute Engine) +if [ "x$PROVIDER" == "x" ];then + PROVIDER=gce +fi + +UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]') + # FIXME: Add options MASTER_PLAYBOOK=openshift-master @@ -12,10 +21,10 @@ MINION_PLAYBOOK=openshift-minion # @formatter:off function usage { cat 1>&2 <<-EOT - ${0} : [create|terminate|update|list] {GCE environment tag} + ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag} Supported environment tags: - $(grep 'SUPPORTED_ENVS.*=' ./lib/gce_command.rb) + $(grep 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb) EOT } # @formatter:on diff --git a/lib/ansible_helper.rb b/lib/ansible_helper.rb index 76af73b0d..080c9d00b 100644 --- a/lib/ansible_helper.rb +++ b/lib/ansible_helper.rb @@ -25,7 +25,6 @@ module OpenShift tmpfile = Tempfile.open('extra_vars') { |f| f.write(@extra_vars.to_json); f} cmds = [] - #cmds << 'set -x' cmds << %Q[export ANSIBLE_FILTER_PLUGINS="#{Dir.pwd}/filter_plugins"] @@ -35,8 +34,7 @@ module OpenShift # We need pipelining off so that we can do sudo to enable the root account cmds << %Q[export ANSIBLE_SSH_PIPELINING='#{@pipelining.to_s}'] - cmds << %Q[time -p ansible-playbook -i #{@inventory} #{@verbosity} #{playbook} --extra-vars '@#{tmpfile.path}'] - + cmds << %Q[time ansible-playbook -i #{@inventory} #{@verbosity} #{playbook} --extra-vars '@#{tmpfile.path}' ] cmd = cmds.join(' ; ') pid = spawn(cmd, :out => $stdout, :err => $stderr, :close_others => true) diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml new file mode 100644 index 000000000..c8345aa2c --- /dev/null +++ b/playbooks/aws/openshift-master/config.yml @@ -0,0 +1,40 @@ +- name: "populate oo_hosts_to_config host group if needed" + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_host_group_exp if it's set + add_host: "name={{ item }} groups=oo_hosts_to_config" + with_items: "{{ oo_host_group_exp | default('') }}" + when: oo_host_group_exp is defined + +- name: "Gather facts for minions in {{ oo_env }}" + hosts: "tag_env-host-type_{{ oo_env }}-openshift-minion" + connection: ssh + user: root + +- name: "Set Origin specific facts on localhost (for later use)" + hosts: localhost + gather_facts: no + tasks: + - name: Setting oo_minion_ips fact on localhost + set_fact: + oo_minion_ips: "{{ hostvars + | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-minion']) + | oo_collect(attribute='ansible_eth0.ipv4.address') }}" + when: groups['tag_env-host-type_' + oo_env + '-openshift-minion'] is defined + +- name: "Configure instances" + hosts: oo_hosts_to_config + connection: ssh + user: root + vars_files: + - vars.yml + roles: + - ../../../roles/base_os + - ../../../roles/repos + - { + role: ../../../roles/openshift_master, + oo_minion_ips: "{{ hostvars['localhost'].oo_minion_ips | default(['']) }}", + oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}" + } + - ../../../roles/pods diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml new file mode 100644 index 000000000..a889b93be --- /dev/null +++ b/playbooks/aws/openshift-master/launch.yml @@ -0,0 +1,69 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + gather_facts: no + + vars: + inst_region: us-east-1 + atomic_ami: ami-86781fee + user_data_file: user_data.txt + + vars_files: + - vars.yml + + tasks: + - name: Launch instances + ec2: + state: present + region: "{{ inst_region }}" + keypair: libra + group: ['public'] + instance_type: m3.large + image: "{{ atomic_ami }}" + count: "{{ oo_new_inst_names | oo_len }}" + user_data: "{{ lookup('file', user_data_file) }}" + wait: yes + register: ec2 + + - name: Add new instances public IPs to the atomic proxy host group + add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances" + with_items: ec2.instances + + - name: Add Name and environment tags to instances + ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" + with_together: + - oo_new_inst_names + - ec2.instances + args: + tags: + Name: "{{ item.0 }}" + + - name: Add other tags to instances + ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + with_items: ec2.instances + args: + tags: "{{ oo_new_inst_tags }}" + + - name: Add new instances public IPs to oo_hosts_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + with_together: + - oo_new_inst_names + - ec2.instances + + - debug: var=ec2 + + - name: Wait for ssh + wait_for: "port=22 host={{ item.dns_name }}" + with_items: ec2.instances + + - name: Wait for root user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: ec2.instances + +# Apply the configs, seprate so that just the configs can be run by themselves +- include: config.yml diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml new file mode 100644 index 000000000..e69de29bb diff --git a/playbooks/aws/openshift-minion/config.yml b/playbooks/aws/openshift-minion/config.yml new file mode 100644 index 000000000..b59ba5a9b --- /dev/null +++ b/playbooks/aws/openshift-minion/config.yml @@ -0,0 +1,40 @@ +- name: "populate oo_hosts_to_config host group if needed" + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_host_group_exp + add_host: "name={{ item }} groups=oo_hosts_to_config" + with_items: "{{ oo_host_group_exp | default('') }}" + when: oo_host_group_exp is defined + +- name: "Gather facts for masters in {{ oo_env }}" + hosts: "tag_env-host-type_{{ oo_env }}-openshift-master" + connection: ssh + user: root + +- name: "Set OO sepcific facts on localhost (for later use)" + hosts: localhost + gather_facts: no + tasks: + - name: Setting oo_master_ips fact on localhost + set_fact: + oo_master_ips: "{{ hostvars + | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master']) + | oo_collect(attribute='ansible_eth0.ipv4.address') }}" + when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined + +- name: "Configure instances" + hosts: oo_hosts_to_config + connection: ssh + user: root + vars_files: + - vars.yml + roles: + - ../../../roles/base_os + - ../../../roles/repos + - ../../../roles/docker + - { + role: ../../../roles/openshift_minion, + oo_master_ips: "{{ hostvars['localhost'].oo_master_ips | default(['']) }}", + oo_bind_ip: "{{ hostvars[inventory_hostname].ansible_eth0.ipv4.address | default(['']) }}" + } diff --git a/playbooks/aws/openshift-minion/launch.yml b/playbooks/aws/openshift-minion/launch.yml new file mode 100644 index 000000000..a889b93be --- /dev/null +++ b/playbooks/aws/openshift-minion/launch.yml @@ -0,0 +1,69 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + gather_facts: no + + vars: + inst_region: us-east-1 + atomic_ami: ami-86781fee + user_data_file: user_data.txt + + vars_files: + - vars.yml + + tasks: + - name: Launch instances + ec2: + state: present + region: "{{ inst_region }}" + keypair: libra + group: ['public'] + instance_type: m3.large + image: "{{ atomic_ami }}" + count: "{{ oo_new_inst_names | oo_len }}" + user_data: "{{ lookup('file', user_data_file) }}" + wait: yes + register: ec2 + + - name: Add new instances public IPs to the atomic proxy host group + add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances" + with_items: ec2.instances + + - name: Add Name and environment tags to instances + ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" + with_together: + - oo_new_inst_names + - ec2.instances + args: + tags: + Name: "{{ item.0 }}" + + - name: Add other tags to instances + ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + with_items: ec2.instances + args: + tags: "{{ oo_new_inst_tags }}" + + - name: Add new instances public IPs to oo_hosts_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + with_together: + - oo_new_inst_names + - ec2.instances + + - debug: var=ec2 + + - name: Wait for ssh + wait_for: "port=22 host={{ item.dns_name }}" + with_items: ec2.instances + + - name: Wait for root user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: ec2.instances + +# Apply the configs, seprate so that just the configs can be run by themselves +- include: config.yml diff --git a/playbooks/aws/openshift-minion/vars.yml b/playbooks/aws/openshift-minion/vars.yml new file mode 100644 index 000000000..e69de29bb -- cgit v1.2.3