From 8613b70503d2d1cbe57ddebc11919edeb26eaadc Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 17:15:19 -0400 Subject: Rename repos role to openshift_repos - Rename repos role to openshift_repos - Make openshift_repos a dependency of openshift_common - Add README and metadata for openshift_repos - Playbook updates for role rename - Verify libselinux-python is installed, otherwise some of the bulit-in modules we use fail --- playbooks/aws/openshift-master/config.yml | 1 - playbooks/aws/openshift-node/config.yml | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index 454cd6f24..3d6238360 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -31,7 +31,6 @@ vars_files: - vars.yml roles: - - repos - { role: openshift_master, openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}", diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 9662168c4..d39ad781f 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -37,8 +37,6 @@ vars_files: - vars.yml roles: - - repos - - docker - { role: openshift_node, openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}", @@ -46,4 +44,5 @@ openshift_env: "{{ oo_env }}" openshift_public_ip: "{{ ec2_ip_address }}" } + - docker - os_env_extras -- cgit v1.2.3 From 3d4144c56731d3efdfd0c34083256e139f8e9571 Mon Sep 17 00:00:00 2001 From: liangxia Date: Thu, 19 Mar 2015 07:35:21 +0000 Subject: minor fix --- playbooks/aws/openshift-master/config.yml | 2 +- playbooks/aws/openshift-node/config.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index 454cd6f24..0d0e6d5bf 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -35,7 +35,7 @@ - { role: openshift_master, openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}", - openshift_env: "{{ oo_env }}" + openshift_env: "{{ oo_env }}", openshift_public_ip: "{{ ec2_ip_address }}" } - pods diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 9662168c4..317785c84 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -43,7 +43,7 @@ role: openshift_node, openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}", openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}", - openshift_env: "{{ oo_env }}" + openshift_env: "{{ oo_env }}", openshift_public_ip: "{{ ec2_ip_address }}" } - os_env_extras -- cgit v1.2.3 From 9575258e5a1b8f9ee8ec7ffc7ad74fa5dfeabc00 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 18 Mar 2015 13:25:18 -0400 Subject: replace oo_hosts_to_config with oo_nodes_to_config and oo_masters_to_config --- playbooks/aws/openshift-master/config.yml | 6 +++--- playbooks/aws/openshift-master/launch.yml | 4 ++-- playbooks/aws/openshift-node/config.yml | 6 +++--- playbooks/aws/openshift-node/launch.yml | 4 ++-- playbooks/gce/openshift-master/launch.yml | 4 ++-- playbooks/gce/openshift-node/config.yml | 1 - playbooks/gce/openshift-node/launch.yml | 8 ++++---- 7 files changed, 16 insertions(+), 17 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index b3227afa9..bbf1f654a 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -1,10 +1,10 @@ --- -- name: "populate oo_hosts_to_config host group if needed" +- name: "populate oo_masters_to_config host group if needed" hosts: localhost gather_facts: no tasks: - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_masters_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined @@ -25,7 +25,7 @@ when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined - name: "Configure instances" - hosts: oo_hosts_to_config + hosts: oo_masters_to_config connection: ssh user: root vars_files: diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml index a889b93be..3d5a7f579 100644 --- a/playbooks/aws/openshift-master/launch.yml +++ b/playbooks/aws/openshift-master/launch.yml @@ -45,8 +45,8 @@ args: tags: "{{ oo_new_inst_tags }}" - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_masters_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config" with_together: - oo_new_inst_names - ec2.instances diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 21807b1cf..822b66464 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -1,10 +1,10 @@ --- -- name: "populate oo_hosts_to_config host group if needed" +- name: "populate oo_nodes_to_config host group if needed" hosts: localhost gather_facts: no tasks: - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_hosts_to_config" + add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined @@ -31,7 +31,7 @@ when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined - name: "Configure instances" - hosts: oo_hosts_to_config + hosts: oo_nodes_to_config connection: ssh user: root vars_files: diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml index a889b93be..4745fc658 100644 --- a/playbooks/aws/openshift-node/launch.yml +++ b/playbooks/aws/openshift-node/launch.yml @@ -45,8 +45,8 @@ args: tags: "{{ oo_new_inst_tags }}" - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_nodes_to_config + add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config" with_together: - oo_new_inst_names - ec2.instances diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml index f2800b061..3512274cc 100644 --- a/playbooks/gce/openshift-master/launch.yml +++ b/playbooks/gce/openshift-master/launch.yml @@ -24,8 +24,8 @@ tags: "{{ oo_new_inst_tags }}" register: gce - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_masters_to_config + add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config" with_items: gce.instance_data - name: Wait for ssh diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 9d87c4e8f..d24acb8fa 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -121,4 +121,3 @@ vars_files: - openshift_node - os_env_extras - os_env_extras_node - diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml index 935599efd..ca2914d8a 100644 --- a/playbooks/gce/openshift-node/launch.yml +++ b/playbooks/gce/openshift-node/launch.yml @@ -24,8 +24,8 @@ tags: "{{ oo_new_inst_tags }}" register: gce - - name: Add new instances public IPs to oo_hosts_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config" + - name: Add new instances public IPs to oo_nodes_to_config + add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config" with_items: gce.instance_data - name: Wait for ssh @@ -48,10 +48,10 @@ # Always bounce service to pick up new credentials #- name: "Restart instances" -# hosts: oo_hosts_to_config +# hosts: oo_nodes_to_config # connection: ssh # user: root # tasks: -# - debug: var=groups.oo_hosts_to_config +# - debug: var=groups.oo_nodes_to_config # - name: Restart OpenShift # service: name=openshift-node enabled=yes state=restarted -- cgit v1.2.3 From 8f35aff7245246de4116fcf3c81e7f095cf1be3a Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sun, 22 Mar 2015 22:11:22 -0400 Subject: Add new role os_env_extras_node that is a subset of the docker role - Does not install or start docker, since the openshift-node role will handle that for us - Only add root to the dockerroot group and configures the enter-container script. --- playbooks/aws/openshift-node/config.yml | 2 +- roles/os_env_extras_node/README.md | 38 +++++++ roles/os_env_extras_node/files/enter-container.sh | 13 +++ roles/os_env_extras_node/meta/main.yml | 124 ++++++++++++++++++++++ roles/os_env_extras_node/tasks/main.yml | 7 ++ 5 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 roles/os_env_extras_node/README.md create mode 100755 roles/os_env_extras_node/files/enter-container.sh create mode 100644 roles/os_env_extras_node/meta/main.yml create mode 100644 roles/os_env_extras_node/tasks/main.yml (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 822b66464..3cf2c58b2 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -44,5 +44,5 @@ openshift_env: "{{ oo_env }}", openshift_public_ip: "{{ ec2_ip_address }}" } - - docker - os_env_extras + - os_env_extras_node diff --git a/roles/os_env_extras_node/README.md b/roles/os_env_extras_node/README.md new file mode 100644 index 000000000..225dd44b9 --- /dev/null +++ b/roles/os_env_extras_node/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/os_env_extras_node/files/enter-container.sh b/roles/os_env_extras_node/files/enter-container.sh new file mode 100755 index 000000000..7cf5b8d83 --- /dev/null +++ b/roles/os_env_extras_node/files/enter-container.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +if [ $# -ne 1 ] +then + echo + echo "Usage: $(basename $0) " + echo + exit 1 +fi + +PID=$(docker inspect --format '{{.State.Pid}}' $1) + +nsenter --target $PID --mount --uts --ipc --net --pid diff --git a/roles/os_env_extras_node/meta/main.yml b/roles/os_env_extras_node/meta/main.yml new file mode 100644 index 000000000..c5c362c60 --- /dev/null +++ b/roles/os_env_extras_node/meta/main.yml @@ -0,0 +1,124 @@ +--- +galaxy_info: + author: your name + description: + company: your company (optional) + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + #- name: EL + # versions: + # - all + # - 5 + # - 6 + # - 7 + #- name: GenericUNIX + # versions: + # - all + # - any + #- name: Fedora + # versions: + # - all + # - 16 + # - 17 + # - 18 + # - 19 + # - 20 + #- name: opensuse + # versions: + # - all + # - 12.1 + # - 12.2 + # - 12.3 + # - 13.1 + # - 13.2 + #- name: Amazon + # versions: + # - all + # - 2013.03 + # - 2013.09 + #- name: GenericBSD + # versions: + # - all + # - any + #- name: FreeBSD + # versions: + # - all + # - 8.0 + # - 8.1 + # - 8.2 + # - 8.3 + # - 8.4 + # - 9.0 + # - 9.1 + # - 9.1 + # - 9.2 + #- name: Ubuntu + # versions: + # - all + # - lucid + # - maverick + # - natty + # - oneiric + # - precise + # - quantal + # - raring + # - saucy + # - trusty + #- name: SLES + # versions: + # - all + # - 10SP3 + # - 10SP4 + # - 11 + # - 11SP1 + # - 11SP2 + # - 11SP3 + #- name: GenericLinux + # versions: + # - all + # - any + #- name: Debian + # versions: + # - all + # - etch + # - lenny + # - squeeze + # - wheezy + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + #- cloud + #- cloud:ec2 + #- cloud:gce + #- cloud:rax + #- clustering + #- database + #- database:nosql + #- database:sql + #- development + #- monitoring + #- networking + #- packaging + #- system + #- web +dependencies: [] + # List your role dependencies here, one per line. Only + # dependencies available via galaxy should be listed here. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml new file mode 100644 index 000000000..065f71f74 --- /dev/null +++ b/roles/os_env_extras_node/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- copy: src=enter-container.sh dest=/usr/local/bin/enter-container.sh mode=0755 + +# From the origin rpm there exists instructions on how to +# setup origin properly. The following steps come from there +- name: Change root to be in the Docker group + user: name=root groups=dockerroot append=yes -- cgit v1.2.3 From 4712e72c912a1102bff0508c98bd97da3f33ae95 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 23 Mar 2015 23:53:17 -0400 Subject: openshift_facts role/module refactor default settings - Add openshift_facts role and module - Created new role openshift_facts that contains an openshift_facts module - Refactor openshift_* roles to use openshift_facts instead of relying on defaults - Refactor playbooks to use openshift_facts - Cleanup inventory group_vars - Update defaults - update openshift_master role firewall defaults - remove etcd peer port, since we will not be supporting clustered embedded etcd - remove 8444 since console now runs on the api port by default - add 8444 and 7001 to disabled services to ensure removal if updating - Add new role os_env_extras_node that is a subset of the docker role - previously, we were starting/enabling docker which was causing issues with some installations - Does not install or start docker, since the openshift-node role will handle that for us - Only adds root to the dockerroot group - Update playbooks to use ops_env_extras_node role instead of docker role - os_firewall bug fixes - ignore ip6tables for now, since we are not configuring any ipv6 rules - if installing package do a daemon-reload before starting/enabling service - Add aws support to bin/cluster - Add list action to bin/cluster - Add update action to bin/cluster - cleanup some stray debug statements - some variable renaming for clarity --- README_AWS.md | 26 +- README_GCE.md | 25 +- bin/cluster | 17 +- cluster.sh | 113 ----- inventory/aws/group_vars/all | 2 + inventory/gce/group_vars/all | 5 - inventory/gce/group_vars/tag_host-type-master | 5 - inventory/gce/group_vars/tag_host-type-node | 6 - .../gce/group_vars/tag_host-type-openshift-master | 1 - .../gce/group_vars/tag_host-type-openshift-node | 1 - playbooks/aws/openshift-cluster/filter_plugins | 1 + playbooks/aws/openshift-cluster/launch.yml | 62 +++ .../aws/openshift-cluster/launch_instances.yml | 62 +++ playbooks/aws/openshift-cluster/list.yml | 17 + playbooks/aws/openshift-cluster/roles | 1 + playbooks/aws/openshift-cluster/terminate.yml | 14 + playbooks/aws/openshift-cluster/update.yml | 13 + playbooks/aws/openshift-cluster/vars.yml | 1 + playbooks/aws/openshift-master/config.yml | 37 +- playbooks/aws/openshift-master/launch.yml | 9 +- playbooks/aws/openshift-master/terminate.yml | 52 +++ playbooks/aws/openshift-master/vars.yml | 1 + playbooks/aws/openshift-node/config.yml | 123 ++++-- playbooks/aws/openshift-node/launch.yml | 13 +- playbooks/aws/openshift-node/terminate.yml | 52 +++ playbooks/aws/openshift-node/vars.yml | 1 + playbooks/gce/openshift-cluster/launch.yml | 9 +- .../gce/openshift-cluster/launch_instances.yml | 7 +- playbooks/gce/openshift-cluster/list.yml | 17 + playbooks/gce/openshift-cluster/update.yml | 13 + playbooks/gce/openshift-master/config.yml | 6 +- playbooks/gce/openshift-master/launch.yml | 12 +- playbooks/gce/openshift-master/terminate.yml | 16 +- playbooks/gce/openshift-master/vars.yml | 1 + playbooks/gce/openshift-node/config.yml | 94 ++-- playbooks/gce/openshift-node/launch.yml | 22 +- playbooks/gce/openshift-node/terminate.yml | 16 +- playbooks/gce/openshift-node/vars.yml | 1 + roles/openshift_common/README.md | 17 +- roles/openshift_common/defaults/main.yml | 1 + roles/openshift_common/meta/main.yml | 1 + roles/openshift_common/tasks/main.yml | 29 +- roles/openshift_common/tasks/set_facts.yml | 9 - roles/openshift_common/vars/main.yml | 5 +- roles/openshift_facts/README.md | 34 ++ roles/openshift_facts/library/openshift_facts.py | 482 +++++++++++++++++++++ roles/openshift_facts/meta/main.yml | 15 + roles/openshift_facts/tasks/main.yml | 3 + roles/openshift_master/README.md | 28 +- roles/openshift_master/defaults/main.yml | 13 +- roles/openshift_master/handlers/main.yml | 1 - roles/openshift_master/tasks/main.yml | 50 ++- roles/openshift_master/vars/main.yml | 2 - roles/openshift_node/README.md | 3 - roles/openshift_node/defaults/main.yml | 2 - roles/openshift_node/handlers/main.yml | 2 +- roles/openshift_node/tasks/main.yml | 27 +- roles/openshift_node/vars/main.yml | 2 - roles/openshift_register_nodes/README.md | 22 +- .../library/kubernetes_register_node.py | 3 +- roles/openshift_register_nodes/meta/main.yml | 141 +----- roles/openshift_register_nodes/tasks/main.yml | 58 ++- roles/openshift_repos/defaults/main.yaml | 2 + roles/openshift_repos/meta/main.yml | 3 +- roles/openshift_repos/tasks/main.yaml | 6 + roles/openshift_sdn_master/defaults/main.yml | 2 - roles/openshift_sdn_master/meta/main.yml | 3 +- roles/openshift_sdn_master/tasks/main.yml | 18 +- roles/openshift_sdn_node/README.md | 6 - roles/openshift_sdn_node/defaults/main.yml | 2 - roles/openshift_sdn_node/meta/main.yml | 3 +- roles/openshift_sdn_node/tasks/main.yml | 23 +- roles/os_env_extras_node/tasks/main.yml | 5 + .../library/os_firewall_manage_iptables.py | 1 + roles/os_firewall/meta/main.yml | 1 + roles/os_firewall/tasks/firewall/firewalld.yml | 5 + roles/os_firewall/tasks/firewall/iptables.yml | 12 +- 77 files changed, 1290 insertions(+), 626 deletions(-) delete mode 100755 cluster.sh create mode 100644 inventory/aws/group_vars/all delete mode 100644 inventory/gce/group_vars/tag_host-type-master delete mode 100644 inventory/gce/group_vars/tag_host-type-node delete mode 120000 inventory/gce/group_vars/tag_host-type-openshift-master delete mode 120000 inventory/gce/group_vars/tag_host-type-openshift-node create mode 120000 playbooks/aws/openshift-cluster/filter_plugins create mode 100644 playbooks/aws/openshift-cluster/launch.yml create mode 100644 playbooks/aws/openshift-cluster/launch_instances.yml create mode 100644 playbooks/aws/openshift-cluster/list.yml create mode 120000 playbooks/aws/openshift-cluster/roles create mode 100644 playbooks/aws/openshift-cluster/terminate.yml create mode 100644 playbooks/aws/openshift-cluster/update.yml create mode 100644 playbooks/aws/openshift-cluster/vars.yml create mode 100644 playbooks/aws/openshift-master/terminate.yml create mode 100644 playbooks/aws/openshift-node/terminate.yml create mode 100644 playbooks/gce/openshift-cluster/list.yml create mode 100644 playbooks/gce/openshift-cluster/update.yml delete mode 100644 roles/openshift_common/tasks/set_facts.yml create mode 100644 roles/openshift_facts/README.md create mode 100755 roles/openshift_facts/library/openshift_facts.py create mode 100644 roles/openshift_facts/meta/main.yml create mode 100644 roles/openshift_facts/tasks/main.yml delete mode 100644 roles/openshift_master/vars/main.yml delete mode 100644 roles/openshift_node/vars/main.yml mode change 100644 => 100755 roles/openshift_register_nodes/library/kubernetes_register_node.py delete mode 100644 roles/openshift_sdn_master/defaults/main.yml delete mode 100644 roles/openshift_sdn_node/defaults/main.yml create mode 100644 roles/os_env_extras_node/tasks/main.yml mode change 100644 => 100755 roles/os_firewall/library/os_firewall_manage_iptables.py (limited to 'playbooks/aws') diff --git a/README_AWS.md b/README_AWS.md index fb9d0f895..e877f34c6 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -51,7 +51,29 @@ OSX: Test The Setup -------------- 1. cd openshift-ansible -1. Try to list all instances: +1. Try to list all instances (Passing an empty string as the cluster_id +argument will result in all ec2 instances being listed) ``` - ./cloud.rb aws list + bin/cluster list aws '' +``` + +Creating a cluster +------------------ +1. To create a cluster with one master and two nodes +``` + bin/cluster create aws +``` + +Updating a cluster +--------------------- +1. To update the cluster +``` + bin/cluster update aws +``` + +Terminating a cluster +--------------------- +1. To terminate the cluster +``` + bin/cluster terminate aws ``` diff --git a/README_GCE.md b/README_GCE.md index 209705113..f6c5138c1 100644 --- a/README_GCE.md +++ b/README_GCE.md @@ -65,12 +65,29 @@ Install Dependencies Test The Setup -------------- 1. cd openshift-ansible/ -2. Try to list all instances: +1. Try to list all instances (Passing an empty string as the cluster_id +argument will result in all gce instances being listed) ``` - ./cloud.rb gce list + bin/cluster list gce '' ``` -3. Try to create an instance: +Creating a cluster +------------------ +1. To create a cluster with one master and two nodes ``` - ./cloud.rb gce launch -e int --type openshift-node + bin/cluster create gce +``` + +Updating a cluster +--------------------- +1. To update the cluster +``` + bin/cluster update gce +``` + +Terminating a cluster +--------------------- +1. To terminate the cluster +``` + bin/cluster terminate gce ``` diff --git a/bin/cluster b/bin/cluster index b99286b46..36ab1da1b 100755 --- a/bin/cluster +++ b/bin/cluster @@ -32,8 +32,8 @@ class Cluster(object): playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) - env['masters'] = args.masters - env['nodes'] = args.nodes + env['num_masters'] = args.masters + env['num_nodes'] = args.nodes return self.action(args, inventory, env, playbook) @@ -55,16 +55,23 @@ class Cluster(object): :param args: command line arguments provided by user :return: exit status from run command """ - raise NotImplementedError("ACTION [{}] not implemented".format(sys._getframe().f_code.co_name)) + env = {'cluster_id': args.cluster_id} + playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) + inventory = self.setup_provider(args.provider) + + return self.action(args, inventory, env, playbook) def update(self, args): """ - Update OpenShift across clustered VMs + Update to latest OpenShift across clustered VMs :param args: command line arguments provided by user :return: exit status from run command """ - raise NotImplementedError("ACTION [{}] not implemented".format(sys._getframe().f_code.co_name)) + env = {'cluster_id': args.cluster_id} + playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) + inventory = self.setup_provider(args.provider) + return self.action(args, inventory, env, playbook) def setup_provider(self, provider): """ diff --git a/cluster.sh b/cluster.sh deleted file mode 100755 index 9c9aad4d2..000000000 --- a/cluster.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/bash -eu - -NODES=2 -MASTERS=1 - -# If the environment variable OO_PROVDER is defined, it used for the provider -PROVIDER=${OO_PROVIDER:-''} -# Otherwise, default is gce (Google Compute Engine) -if [ "x$PROVIDER" == "x" ];then - PROVIDER=gce -fi - -UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]') - - -# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined, -# otherwise use openshift default values. -MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'} -NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'} - - -# @formatter:off -function usage { - cat 1>&2 <<-EOT - ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag} - - Supported environment tags: - $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb) - $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}") - - Optional arguments for create: - [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook] - - Optional arguments for terminate|update: - [-p|--provider, --master-playbook, --node-playbook] -EOT -} -# @formatter:on - -function create_cluster { - ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS - - ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES - - update_cluster - - echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n" -} - -function update_cluster { - ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK - ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK -} - -function terminate_cluster { - ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK - ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK -} - -[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1) - -function check_argval { - if [[ $1 == -* ]]; then - echo "Invalid value: '$1'" - usage - exit 1 - fi -} - -# Using GNU getopt to support both small and long formats -OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \ - -n "$0" -- "$@"` -eval set -- "$OPTIONS" - -while true; do - case "$1" in - -h|--help) (usage; exit 1) ; shift ;; - -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;; - -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;; - -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;; - --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;; - --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;; - --) shift ; break ;; - *) break ;; - esac -done - -shift $((OPTIND-1)) - -[ -z "${1:-}" ] && (usage; exit 1) - -case "${1}" in - 'create') - [ -z "${2:-}" ] && (usage; exit 1) - ENV="${2}" - create_cluster ;; - 'update') - [ -z "${2:-}" ] && (usage; exit 1) - ENV="${2}" - update_cluster ;; - 'terminate') - [ -z "${2:-}" ] && (usage; exit 1) - ENV="${2}" - terminate_cluster ;; - 'list') ./cloud.rb "${PROVIDER}" list ;; - 'help') usage; exit 0 ;; - *) - echo -n 1>&2 "${1} is not a supported operation"; - usage; - exit 1 ;; -esac - -exit 0 diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all new file mode 100644 index 000000000..b22da00de --- /dev/null +++ b/inventory/aws/group_vars/all @@ -0,0 +1,2 @@ +--- +ansible_ssh_user: root diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all index 3e969df63..b22da00de 100644 --- a/inventory/gce/group_vars/all +++ b/inventory/gce/group_vars/all @@ -1,7 +1,2 @@ --- ansible_ssh_user: root -openshift_hostname: "{{ ansible_default_ipv4.address }}" -openshift_public_hostname: "{{ ansible_default_ipv4.address }}" -openshift_ip: "{{ ansible_default_ipv4.address }}" -openshift_public_ip: "{{ gce_public_ip }}" -openshift_env: "{{ oo_env }}" diff --git a/inventory/gce/group_vars/tag_host-type-master b/inventory/gce/group_vars/tag_host-type-master deleted file mode 100644 index ddbdc650c..000000000 --- a/inventory/gce/group_vars/tag_host-type-master +++ /dev/null @@ -1,5 +0,0 @@ ---- -openshift_api_url: https://{{ openshift_hostname }}:8443 -openshift_api_public_url: https://{{ openshift_public_hostname }}:8443 -openshift_webui_url: https://{{ openshift_hostname }}:8444 -openshift_webui_public_url: https://{{ openshift_public_hostname }}:8444 diff --git a/inventory/gce/group_vars/tag_host-type-node b/inventory/gce/group_vars/tag_host-type-node deleted file mode 100644 index bb95a724d..000000000 --- a/inventory/gce/group_vars/tag_host-type-node +++ /dev/null @@ -1,6 +0,0 @@ ---- -openshift_node_cpu: -openshift_node_memory: -openshift_node_pod_cidr: -openshift_node_labels: {} -openshift_node_annotations: {} diff --git a/inventory/gce/group_vars/tag_host-type-openshift-master b/inventory/gce/group_vars/tag_host-type-openshift-master deleted file mode 120000 index c0c4cf370..000000000 --- a/inventory/gce/group_vars/tag_host-type-openshift-master +++ /dev/null @@ -1 +0,0 @@ -tag_host-type-master \ No newline at end of file diff --git a/inventory/gce/group_vars/tag_host-type-openshift-node b/inventory/gce/group_vars/tag_host-type-openshift-node deleted file mode 120000 index ebbce6136..000000000 --- a/inventory/gce/group_vars/tag_host-type-openshift-node +++ /dev/null @@ -1 +0,0 @@ -tag_host-type-node \ No newline at end of file diff --git a/playbooks/aws/openshift-cluster/filter_plugins b/playbooks/aws/openshift-cluster/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/aws/openshift-cluster/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml new file mode 100644 index 000000000..3561c1803 --- /dev/null +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -0,0 +1,62 @@ +--- +- name: Launch instance(s) + hosts: localhost + connection: local + gather_facts: no + vars_files: + - vars.yml + tasks: + - set_fact: k8s_type="master" + + - name: Generate master instance names(s) + set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} + register: master_names_output + with_sequence: start=1 end={{ num_masters }} + + # These set_fact's cannot be combined + - set_fact: + master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" + + - set_fact: + master_names: "{{ master_names_string.strip().split(' ') }}" + + - include: launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + + - set_fact: k8s_type="node" + + - name: Generate node instance names(s) + set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} + register: node_names_output + with_sequence: start=1 end={{ num_nodes }} + + # These set_fact's cannot be combined + - set_fact: + node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" + + - set_fact: + node_names: "{{ node_names_string.strip().split(' ') }}" + + - include: launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + +- hosts: "tag_env_{{ cluster_id }}" + roles: + - openshift_repos + - os_update_latest + +- include: ../openshift-master/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" + +- include: ../openshift-node/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" + +- include: list.yml diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml new file mode 100644 index 000000000..e4d5952fd --- /dev/null +++ b/playbooks/aws/openshift-cluster/launch_instances.yml @@ -0,0 +1,62 @@ +--- +- set_fact: + machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}" + machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}" + machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}" + machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" + created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" + env: "{{ cluster }}" + host_type: "{{ type }}" + env_host_type: "{{ cluster }}-openshift-{{ type }}" + +- name: Launch instance(s) + ec2: + state: present + region: "{{ machine_region }}" + keypair: "{{ machine_keypair }}" + group: ['public'] + instance_type: "{{ machine_type }}" + image: "{{ machine_image }}" + count: "{{ instances | oo_len }}" + wait: yes + instance_tags: + created-by: "{{ created_by }}" + env: "{{ env }}" + host-type: "{{ host_type }}" + env-host-type: "{{ env_host_type }}" + register: ec2 + +- name: Add Name tag to instances + ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present + with_together: + - instances + - ec2.instances + args: + tags: + Name: "{{ item.0 }}" + +- set_fact: + instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} + +- name: Add new instances groups and variables + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + groups: "{{ instance_groups }}" + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" + with_together: + - instances + - ec2.instances + +- name: Wait for ssh + wait_for: "port=22 host={{ item.dns_name }}" + with_items: ec2.instances + +- name: Wait for root user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: ec2.instances diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml new file mode 100644 index 000000000..08e9e2df4 --- /dev/null +++ b/playbooks/aws/openshift-cluster/list.yml @@ -0,0 +1,17 @@ +--- +- name: Generate oo_list_hosts group + hosts: localhost + gather_facts: no + tasks: + - set_fact: scratch_group=tag_env_{{ cluster_id }} + when: cluster_id != '' + - set_fact: scratch_group=all + when: scratch_group is not defined + - add_host: name={{ item }} groups=oo_list_hosts + with_items: groups[scratch_group] | difference(['localhost']) + +- name: List Hosts + hosts: oo_list_hosts + gather_facts: no + tasks: + - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}" diff --git a/playbooks/aws/openshift-cluster/roles b/playbooks/aws/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/aws/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml new file mode 100644 index 000000000..39607633a --- /dev/null +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -0,0 +1,14 @@ +--- +- name: Terminate instance(s) + hosts: localhost + + vars_files: + - vars.yml + +- include: ../openshift-node/terminate.yml + vars: + oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]' + +- include: ../openshift-master/terminate.yml + vars: + oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]' diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml new file mode 100644 index 000000000..90ecdc6ab --- /dev/null +++ b/playbooks/aws/openshift-cluster/update.yml @@ -0,0 +1,13 @@ +--- +- hosts: "tag_env_{{ cluster_id }}" + roles: + - openshift_repos + - os_update_latest + +- include: ../openshift-master/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" + +- include: ../openshift-node/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index bbf1f654a..1c4060eee 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -1,5 +1,5 @@ --- -- name: "populate oo_masters_to_config host group if needed" +- name: Populate oo_masters_to_config host group if needed hosts: localhost gather_facts: no tasks: @@ -8,34 +8,17 @@ with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined -- name: "Gather facts for nodes in {{ oo_env }}" - hosts: "tag_env-host-type_{{ oo_env }}-openshift-node" - connection: ssh - user: root - -- name: "Set Origin specific facts on localhost (for later use)" - hosts: localhost - gather_facts: no - tasks: - - name: Setting openshift_node_ips fact on localhost - set_fact: - openshift_node_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node']) - | oo_collect(attribute='ansible_default_ipv4.address') }}" - when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined - -- name: "Configure instances" +- name: Configure instances hosts: oo_masters_to_config - connection: ssh - user: root + vars: + openshift_hostname: "{{ ec2_private_ip_address }}" + openshift_public_hostname: "{{ ec2_ip_address }}" + # TODO: this should be removed once openshift-sdn packages are available + openshift_use_openshift_sdn: False vars_files: - - vars.yml + - vars.yml roles: - - { - role: openshift_master, - openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}", - openshift_env: "{{ oo_env }}", - openshift_public_ip: "{{ ec2_ip_address }}" - } + - openshift_master + #- openshift_sdn_master - pods - os_env_extras diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml index 3d5a7f579..3d87879a0 100644 --- a/playbooks/aws/openshift-master/launch.yml +++ b/playbooks/aws/openshift-master/launch.yml @@ -46,13 +46,16 @@ tags: "{{ oo_new_inst_tags }}" - name: Add new instances public IPs to oo_masters_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_masters_to_config" + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + groupname: oo_masters_to_config + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" with_together: - oo_new_inst_names - ec2.instances - - debug: var=ec2 - - name: Wait for ssh wait_for: "port=22 host={{ item.dns_name }}" with_items: ec2.instances diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml new file mode 100644 index 000000000..fd15cf00f --- /dev/null +++ b/playbooks/aws/openshift-master/terminate.yml @@ -0,0 +1,52 @@ +--- +- name: Populate oo_masters_to_terminate host group if needed + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_host_group_exp if it's set + add_host: "name={{ item }} groups=oo_masters_to_terminate" + with_items: "{{ oo_host_group_exp | default('') }}" + when: oo_host_group_exp is defined + +- name: Gather facts for instances to terminate + hosts: oo_masters_to_terminate + +- name: Terminate instances + hosts: localhost + connection: local + gather_facts: no + vars: + host_vars: "{{ hostvars + | oo_select_keys(groups['oo_masters_to_terminate']) }}" + tasks: + - name: Terminate instances + ec2: + state: absent + instance_ids: ["{{ item.ec2_id }}"] + region: "{{ item.ec2_region }}" + ignore_errors: yes + register: ec2_term + with_items: host_vars + + # Fail if any of the instances failed to terminate with an error other + # than 403 Forbidden + - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} + when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + with_items: ec2_term.results + + - name: Stop instance if termination failed + ec2: + state: stopped + instance_ids: ["{{ item.item.ec2_id }}"] + region: "{{ item.item.ec2_region }}" + register: ec2_stop + when: item.failed + with_items: ec2_term.results + + - name: Rename stopped instances + ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present + args: + tags: + Name: "{{ item.item.item.ec2_tag_Name }}-terminate" + with_items: ec2_stop.results + diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml index fb5f4ea42..c196b2fca 100644 --- a/playbooks/aws/openshift-master/vars.yml +++ b/playbooks/aws/openshift-master/vars.yml @@ -1,2 +1,3 @@ --- openshift_debug_level: 4 +openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index 3cf2c58b2..b08ed7571 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -1,5 +1,5 @@ --- -- name: "populate oo_nodes_to_config host group if needed" +- name: Populate oo_nodes_to_config host group if needed hosts: localhost gather_facts: no tasks: @@ -7,42 +7,101 @@ add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined + - add_host: + name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + when: oo_host_group_exp is defined -- name: "Gather facts for masters in {{ oo_env }}" - hosts: "tag_env-host-type_{{ oo_env }}-openshift-master" - connection: ssh - user: root -- name: "Set OO sepcific facts on localhost (for later use)" - hosts: localhost - gather_facts: no +- name: Gather and set facts for hosts to configure + hosts: oo_nodes_to_config + roles: + - openshift_facts tasks: - - name: Setting openshift_master_ips fact on localhost - set_fact: - openshift_master_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master']) - | oo_collect(attribute='ansible_default_ipv4.address') }}" - when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined - - name: Setting openshift_master_public_ips fact on localhost - set_fact: - openshift_master_public_ips: "{{ hostvars - | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master']) - | oo_collect(attribute='ec2_ip_address') }}" - when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined - -- name: "Configure instances" + # Since the master is registering the nodes before they are configured, we + # need to make sure to set the node properties beforehand if we do not want + # the defaults + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ ec2_private_ip_address }}" + public_hostname: "{{ ec2_ip_address }}" + # TODO: this should be removed once openshift-sdn packages are available + use_openshift_sdn: False + - role: node + local_facts: + external_id: "{{ openshift_node_external_id | default(None) }}" + resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" + resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" + pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" + labels: "{{ openshfit_node_labels | default(None) }}" + annotations: "{{ openshfit_node_annotations | default(None) }}" + + +- name: Register nodes + hosts: oo_first_master + vars: + openshift_nodes: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_config']) }}" + roles: + - openshift_register_nodes + tasks: + - name: Create local temp directory for syncing certs + local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + + - name: Sync master certs to localhost + synchronize: + mode: pull + checksum: yes + src: /var/lib/openshift/openshift.local.certificates + dest: "{{ mktemp.stdout }}" + + +- name: Configure instances hosts: oo_nodes_to_config - connection: ssh - user: root vars_files: - - vars.yml + - vars.yml + vars: + openshift_hostname: "{{ ec2_private_ip_address }}" + openshift_public_hostname: "{{ ec2_ip_address }}" + sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" + cert_parent_rel_path: openshift.local.certificates + cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" + cert_base_path: /var/lib/openshift + cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" + cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" + pre_tasks: + - name: Ensure certificate directories exists + file: + path: "{{ item }}" + state: directory + with_items: + - "{{ cert_path }}" + - "{{ cert_parent_path }}/ca" + + # TODO: notify restart openshift-node and/or restart openshift-sdn-node, + # possibly test service started time against certificate/config file + # timestamps in openshift-node or openshift-sdn-node to trigger notify + - name: Sync certs to nodes + synchronize: + checksum: yes + src: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: no + group: no + with_items: + - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" + dest: "{{ cert_parent_path }}" + - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" + dest: "{{ cert_parent_path }}/ca/cert.crt" + - local_action: file name={{ sync_tmpdir }} state=absent + run_once: true roles: - - { - role: openshift_node, - openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}", - openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}", - openshift_env: "{{ oo_env }}", - openshift_public_ip: "{{ ec2_ip_address }}" - } + - openshift_node + #- openshift_sdn_node - os_env_extras - os_env_extras_node diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml index 4745fc658..b7ef593e7 100644 --- a/playbooks/aws/openshift-node/launch.yml +++ b/playbooks/aws/openshift-node/launch.yml @@ -27,7 +27,9 @@ register: ec2 - name: Add new instances public IPs to the atomic proxy host group - add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances" + add_host: + hostname: "{{ item.public_ip }}" + groupname: new_ec2_instances" with_items: ec2.instances - name: Add Name and environment tags to instances @@ -46,13 +48,16 @@ tags: "{{ oo_new_inst_tags }}" - name: Add new instances public IPs to oo_nodes_to_config - add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_nodes_to_config" + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + groupname: oo_nodes_to_config + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" with_together: - oo_new_inst_names - ec2.instances - - debug: var=ec2 - - name: Wait for ssh wait_for: "port=22 host={{ item.dns_name }}" with_items: ec2.instances diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml new file mode 100644 index 000000000..1c0c77eb7 --- /dev/null +++ b/playbooks/aws/openshift-node/terminate.yml @@ -0,0 +1,52 @@ +--- +- name: Populate oo_nodes_to_terminate host group if needed + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_host_group_exp if it's set + add_host: "name={{ item }} groups=oo_nodes_to_terminate" + with_items: "{{ oo_host_group_exp | default('') }}" + when: oo_host_group_exp is defined + +- name: Gather facts for instances to terminate + hosts: oo_nodes_to_terminate + +- name: Terminate instances + hosts: localhost + connection: local + gather_facts: no + vars: + host_vars: "{{ hostvars + | oo_select_keys(groups['oo_nodes_to_terminate']) }}" + tasks: + - name: Terminate instances + ec2: + state: absent + instance_ids: ["{{ item.ec2_id }}"] + region: "{{ item.ec2_region }}" + ignore_errors: yes + register: ec2_term + with_items: host_vars + + # Fail if any of the instances failed to terminate with an error other + # than 403 Forbidden + - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} + when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + with_items: ec2_term.results + + - name: Stop instance if termination failed + ec2: + state: stopped + instance_ids: ["{{ item.item.ec2_id }}"] + region: "{{ item.item.ec2_region }}" + register: ec2_stop + when: item.failed + with_items: ec2_term.results + + - name: Rename stopped instances + ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present + args: + tags: + Name: "{{ item.item.item.ec2_tag_Name }}-terminate" + with_items: ec2_stop.results + diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml index fb5f4ea42..c196b2fca 100644 --- a/playbooks/aws/openshift-node/vars.yml +++ b/playbooks/aws/openshift-node/vars.yml @@ -1,2 +1,3 @@ --- openshift_debug_level: 4 +openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 889d92d40..14cdd2537 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -11,7 +11,7 @@ - name: Generate master instance names(s) set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} register: master_names_output - with_sequence: start=1 end={{ masters }} + with_sequence: start=1 end={{ num_masters }} # These set_fact's cannot be combined - set_fact: @@ -25,14 +25,13 @@ instances: "{{ master_names }}" cluster: "{{ cluster_id }}" type: "{{ k8s_type }}" - group_name: "tag_env-host-type-{{ cluster_id }}-openshift-master" - set_fact: k8s_type="node" - name: Generate node instance names(s) set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} register: node_names_output - with_sequence: start=1 end={{ nodes }} + with_sequence: start=1 end={{ num_nodes }} # These set_fact's cannot be combined - set_fact: @@ -55,9 +54,9 @@ - include: ../openshift-master/config.yml vars: oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" - oo_env: "{{ cluster_id }}" - include: ../openshift-node/config.yml vars: oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" - oo_env: "{{ cluster_id }}" + +- include: list.yml diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml index 20e31d990..b4f33bd87 100644 --- a/playbooks/gce/openshift-cluster/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/launch_instances.yml @@ -1,3 +1,7 @@ +--- +# TODO: when we are ready to go to ansible 1.9+ support only, we can update to +# the gce task to use the disk_auto_delete parameter to avoid having to delete +# the disk as a separate step on termination - set_fact: machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}" @@ -18,12 +22,13 @@ - "env-host-type-{{ cluster }}-openshift-{{ type }}" register: gce -- name: Add new instances public IPs +- name: Add new instances to groups and set variables needed add_host: hostname: "{{ item.name }}" ansible_ssh_host: "{{ item.public_ip }}" groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" gce_public_ip: "{{ item.public_ip }}" + gce_private_ip: "{{ item.private_ip }}" with_items: gce.instance_data - name: Wait for ssh diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml new file mode 100644 index 000000000..1124b0ea3 --- /dev/null +++ b/playbooks/gce/openshift-cluster/list.yml @@ -0,0 +1,17 @@ +--- +- name: Generate oo_list_hosts group + hosts: localhost + gather_facts: no + tasks: + - set_fact: scratch_group=tag_env-{{ cluster_id }} + when: cluster_id != '' + - set_fact: scratch_group=all + when: scratch_group is not defined + - add_host: name={{ item }} groups=oo_list_hosts + with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated) + +- name: List Hosts + hosts: oo_list_hosts + gather_facts: no + tasks: + - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml new file mode 100644 index 000000000..973e4c3ef --- /dev/null +++ b/playbooks/gce/openshift-cluster/update.yml @@ -0,0 +1,13 @@ +--- +- hosts: "tag_env-{{ cluster_id }}" + roles: + - openshift_repos + - os_update_latest + +- include: ../openshift-master/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" + +- include: ../openshift-node/config.yml + vars: + oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index e405e2fb4..857da0763 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -1,3 +1,4 @@ +--- - name: master/config.yml, populate oo_masters_to_config host group if needed hosts: localhost gather_facts: no @@ -7,11 +8,10 @@ with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined -- name: Gather facts for nodes in {{ oo_env }} - hosts: "tag_env-host-type-{{ oo_env }}-openshift-node" - - name: "Configure instances" hosts: oo_masters_to_config + vars: + openshift_hostname: "{{ gce_private_ip }}" vars_files: - vars.yml roles: diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml index 3512274cc..287596002 100644 --- a/playbooks/gce/openshift-master/launch.yml +++ b/playbooks/gce/openshift-master/launch.yml @@ -1,4 +1,8 @@ --- +# TODO: when we are ready to go to ansible 1.9+ support only, we can update to +# the gce task to use the disk_auto_delete parameter to avoid having to delete +# the disk as a separate step on termination + - name: Launch instance(s) hosts: localhost connection: local @@ -25,15 +29,17 @@ register: gce - name: Add new instances public IPs to oo_masters_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_masters_to_config" + add_host: + hostname: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" + groupname: oo_masters_to_config + gce_private_ip: "{{ item.private_ip }}" with_items: gce.instance_data - name: Wait for ssh wait_for: "port=22 host={{ item.public_ip }}" with_items: gce.instance_data - - debug: var=gce - - name: Wait for root user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" register: result diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml index 9e027cf41..8319774f8 100644 --- a/playbooks/gce/openshift-master/terminate.yml +++ b/playbooks/gce/openshift-master/terminate.yml @@ -1,17 +1,13 @@ -- name: "populate oo_hosts_to_terminate host group if needed" +--- +- name: Populate oo_masters_to_terminate host group if needed hosts: localhost gather_facts: no tasks: - - debug: var=oo_host_group_exp - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_hosts_to_terminate" + add_host: "name={{ item }} groups=oo_masters_to_terminate" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined - - debug: msg="{{ groups['oo_hosts_to_terminate'] }}" - - - name: Terminate master instances hosts: localhost connection: local @@ -23,12 +19,10 @@ pem_file: "{{ gce_pem_file }}" project_id: "{{ gce_project_id }}" state: 'absent' - instance_names: "{{ groups['oo_hosts_to_terminate'] }}" - disks: "{{ groups['oo_hosts_to_terminate'] }}" + instance_names: "{{ groups['oo_masters_to_terminate'] }}" + disks: "{{ groups['oo_masters_to_terminate'] }}" register: gce - - debug: var=gce - - name: Remove disks of instances gce_pd: service_account_email: "{{ gce_service_account_email }}" diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml index fb5f4ea42..c196b2fca 100644 --- a/playbooks/gce/openshift-master/vars.yml +++ b/playbooks/gce/openshift-master/vars.yml @@ -1,2 +1,3 @@ --- openshift_debug_level: 4 +openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index e0d074572..771cc3a94 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -1,3 +1,4 @@ +--- - name: node/config.yml, populate oo_nodes_to_config host group if needed hosts: localhost gather_facts: no @@ -6,50 +7,42 @@ add_host: "name={{ item }} groups=oo_nodes_to_config" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined - - name: Find masters for env - add_host: "name={{ item }} groups=oo_masters_for_node_config" - with_items: groups['tag_env-host-type-' + oo_env + '-openshift-master'] + - add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + when: oo_host_group_exp is defined -- name: Gather facts for masters in {{ oo_env }} - hosts: tag_env-host-type-{{ oo_env }}-openshift-master - tasks: - - set_fact: - openshift_master_ip: "{{ openshift_ip }}" - openshift_master_api_url: "{{ openshift_api_url }}" - openshift_master_webui_url: "{{ openshift_webui_url }}" - openshift_master_hostname: "{{ openshift_hostname }}" - openshift_master_public_ip: "{{ openshift_public_ip }}" - openshift_master_api_public_url: "{{ openshift_api_public_url }}" - openshift_master_webui_public_url: "{{ openshift_webui_public_url }}" - openshift_master_public_hostnames: "{{ openshift_public_hostname }}" -- name: Gather facts for hosts to configure - hosts: tag_env-host-type-{{ oo_env }}-openshift-node +- name: Gather and set facts for hosts to configure + hosts: oo_nodes_to_config + roles: + - openshift_facts tasks: - - set_fact: - openshift_node_hostname: "{{ openshift_hostname }}" - openshift_node_name: "{{ openshift_hostname }}" - openshift_node_cpu: "{{ openshift_node_cpu if openshift_node_cpu else ansible_processor_cores }}" - openshift_node_memory: "{{ openshift_node_memory if openshift_node_memory else (ansible_memtotal_mb|int * 1024 * 1024 * 0.75)|int }}" - openshift_node_pod_cidr: "{{ openshift_node_pod_cidr if openshift_node_pod_cidr else None }}" - openshift_node_host_ip: "{{ openshift_ip }}" - openshift_node_labels: "{{ openshift_node_labels if openshift_node_labels else {} }}" - openshift_node_annotations: "{{ openshift_node_annotations if openshift_node_annotations else {} }}" + # Since the master is registering the nodes before they are configured, we + # need to make sure to set the node properties beforehand if we do not want + # the defaults + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ gce_private_ip }}" + - role: node + local_facts: + external_id: "{{ openshift_node_external_id | default(None) }}" + resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" + resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" + pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" + labels: "{{ openshfit_node_labels | default(None) }}" + annotations: "{{ openshfit_node_annotations | default(None) }}" + - name: Register nodes - hosts: tag_env-host-type-{{ oo_env }}-openshift-master[0] + hosts: oo_first_master vars: - openshift_node_group: tag_env-host-type-{{ oo_env }}-openshift-node openshift_nodes: "{{ hostvars - | oo_select_keys(groups[openshift_node_group]) }}" - openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master - openshift_master_urls: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_api_url') }}" - openshift_master_public_urls: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_api_public_url') }}" - pre_tasks: + | oo_select_keys(groups['oo_nodes_to_config']) }}" roles: - openshift_register_nodes tasks: @@ -64,28 +57,14 @@ src: /var/lib/openshift/openshift.local.certificates dest: "{{ mktemp.stdout }}" -# TODO: sync generated certs between masters -# - name: Configure instances hosts: oo_nodes_to_config vars_files: - vars.yml vars: - openshift_master_group: tag_env-host-type-{{ oo_env }}-openshift-master - openshift_master_ips: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_ip') }}" - openshift_master_hostnames: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_hostname') }}" - openshift_master_public_ips: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_public_ip') }}" - openshift_master_public_hostnames: "{{ hostvars - | oo_select_keys(groups[openshift_master_group]) - | oo_collect(attribute='openshift_master_public_hostname') }}" + sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift_node_name }}" + cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" cert_base_path: /var/lib/openshift cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" @@ -98,11 +77,9 @@ - "{{ cert_path }}" - "{{ cert_parent_path }}/ca" - # TODO: only sync to a node if it's certs have been updated # TODO: notify restart openshift-node and/or restart openshift-sdn-node, # possibly test service started time against certificate/config file # timestamps in openshift-node or openshift-sdn-node to trigger notify - # TODO: also copy ca cert: /var/lib/openshift/openshift.local.certificates/ca/cert.crt - name: Sync certs to nodes synchronize: checksum: yes @@ -111,12 +88,13 @@ owner: no group: no with_items: - - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_rel_path }}" + - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" dest: "{{ cert_parent_path }}" - - src: "{{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }}/{{ cert_parent_rel_path }}/ca/cert.crt" + - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ hostvars[groups[openshift_master_group][0]].mktemp.stdout }} state=absent + - local_action: file name={{ sync_tmpdir }} state=absent run_once: true roles: - openshift_node - os_env_extras + - os_env_extras_node diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml index ca2914d8a..73d0478ab 100644 --- a/playbooks/gce/openshift-node/launch.yml +++ b/playbooks/gce/openshift-node/launch.yml @@ -1,4 +1,8 @@ --- +# TODO: when we are ready to go to ansible 1.9+ support only, we can update to +# the gce task to use the disk_auto_delete parameter to avoid having to delete +# the disk as a separate step on termination + - name: Launch instance(s) hosts: localhost connection: local @@ -25,15 +29,17 @@ register: gce - name: Add new instances public IPs to oo_nodes_to_config - add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_nodes_to_config" + add_host: + hostname: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" + groupname: oo_nodes_to_config + gce_private_ip: "{{ item.private_ip }}" with_items: gce.instance_data - name: Wait for ssh wait_for: "port=22 host={{ item.public_ip }}" with_items: gce.instance_data - - debug: var=gce - - name: Wait for root user setup command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" register: result @@ -45,13 +51,3 @@ # Apply the configs, separate so that just the configs can be run by themselves - include: config.yml - -# Always bounce service to pick up new credentials -#- name: "Restart instances" -# hosts: oo_nodes_to_config -# connection: ssh -# user: root -# tasks: -# - debug: var=groups.oo_nodes_to_config -# - name: Restart OpenShift -# service: name=openshift-node enabled=yes state=restarted diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml index 9aa8a48c1..7d71dfcab 100644 --- a/playbooks/gce/openshift-node/terminate.yml +++ b/playbooks/gce/openshift-node/terminate.yml @@ -1,17 +1,13 @@ -- name: "populate oo_hosts_to_terminate host group if needed" +--- +- name: Populate oo_nodes_to_terminate host group if needed hosts: localhost gather_facts: no tasks: - - debug: var=oo_host_group_exp - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_hosts_to_terminate" + add_host: "name={{ item }} groups=oo_nodes_to_terminate" with_items: "{{ oo_host_group_exp | default('') }}" when: oo_host_group_exp is defined - - debug: msg="{{ groups['oo_hosts_to_terminate'] }}" - - - name: Terminate node instances hosts: localhost connection: local @@ -23,12 +19,10 @@ pem_file: "{{ gce_pem_file }}" project_id: "{{ gce_project_id }}" state: 'absent' - instance_names: "{{ groups['oo_hosts_to_terminate'] }}" - disks: "{{ groups['oo_hosts_to_terminate'] }}" + instance_names: "{{ groups['oo_nodes_to_terminate'] }}" + disks: "{{ groups['oo_nodes_to_terminate'] }}" register: gce - - debug: var=gce - - name: Remove disks of instances gce_pd: service_account_email: "{{ gce_service_account_email }}" diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml index fb5f4ea42..c196b2fca 100644 --- a/playbooks/gce/openshift-node/vars.yml +++ b/playbooks/gce/openshift-node/vars.yml @@ -1,2 +1,3 @@ --- openshift_debug_level: 4 +openshift_cluster_id: "{{ cluster_id }}" diff --git a/roles/openshift_common/README.md b/roles/openshift_common/README.md index 880d66e2c..14c2037e4 100644 --- a/roles/openshift_common/README.md +++ b/roles/openshift_common/README.md @@ -12,17 +12,20 @@ rhel-7-server-extra-rpms, and rhel-7-server-ose-beta-rpms repos. Role Variables -------------- -| Name | Default value | | -|-------------------------------|------------------------------|----------------------------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_env | default | Envrionment name if multiple OpenShift instances | +| Name | Default value | | +|---------------------------|-------------------|---------------------------------------------| +| openshift_cluster_id | default | Cluster name if multiple OpenShift clusters | +| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_hostname | UNDEF | Internal hostname to use for this host (this value will set the hostname on the system) | +| openshift_ip | UNDEF | Internal IP address to use for this host | +| openshift_public_hostname | UNDEF | Public hostname to use for this host | +| openshift_public_ip | UNDEF | Public IP address to use for this host | Dependencies ------------ os_firewall +openshift_facts openshift_repos Example Playbook @@ -38,4 +41,4 @@ Apache License, Version 2.0 Author Information ------------------ -TODO +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_common/defaults/main.yml b/roles/openshift_common/defaults/main.yml index 22b2c6ffd..4d3e0fe9e 100644 --- a/roles/openshift_common/defaults/main.yml +++ b/roles/openshift_common/defaults/main.yml @@ -1,2 +1,3 @@ --- +openshift_cluster_id: 'default' openshift_debug_level: 0 diff --git a/roles/openshift_common/meta/main.yml b/roles/openshift_common/meta/main.yml index cee4dd337..81363ec68 100644 --- a/roles/openshift_common/meta/main.yml +++ b/roles/openshift_common/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info: - cloud dependencies: - { role: os_firewall } +- { role: openshift_facts } - { role: openshift_repos } diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 07737a71f..941190534 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,19 +1,16 @@ --- -- name: Set hostname - hostname: name={{ openshift_hostname }} +- name: Set common OpenShift facts + openshift_facts: + role: 'common' + local_facts: + cluster_id: "{{ openshift_cluster_id | default('default') }}" + debug_level: "{{ openshift_debug_level | default(0) }}" + hostname: "{{ openshift_hostname | default(None) }}" + ip: "{{ openshift_ip | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" -- name: Configure local facts file - file: path=/etc/ansible/facts.d/ state=directory mode=0750 +- name: Set hostname + hostname: name={{ openshift.common.hostname }} -- name: Set common OpenShift facts - include: set_facts.yml - facts: - - section: common - option: env - value: "{{ openshift_env | default('default') }}" - - section: common - option: host_type - value: "{{ openshift_host_type }}" - - section: common - option: debug_level - value: "{{ openshift_debug_level }}" diff --git a/roles/openshift_common/tasks/set_facts.yml b/roles/openshift_common/tasks/set_facts.yml deleted file mode 100644 index 349eecd1d..000000000 --- a/roles/openshift_common/tasks/set_facts.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: "Setting local_facts" - ini_file: - dest: /etc/ansible/facts.d/openshift.fact - mode: 0640 - section: "{{ item.section }}" - option: "{{ item.option }}" - value: "{{ item.value }}" - with_items: facts diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 623aed9bf..50816d319 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -1,6 +1,7 @@ --- -openshift_master_credentials_dir: /var/lib/openshift/openshift.local.certificates/admin/ - # TODO: Upstream kubernetes only supports iptables currently, if this changes, # then these variable should be moved to defaults +# TODO: it might be possible to still use firewalld if we wire up the created +# chains with the public zone (or the zone associated with the correct +# interfaces) os_firewall_use_firewalld: False diff --git a/roles/openshift_facts/README.md b/roles/openshift_facts/README.md new file mode 100644 index 000000000..2fd50e236 --- /dev/null +++ b/roles/openshift_facts/README.md @@ -0,0 +1,34 @@ +OpenShift Facts +=============== + +Provides the openshift_facts module + +Requirements +------------ + +None + +Role Variables +-------------- + +None + +Dependencies +------------ + +None + +Example Playbook +---------------- + +TODO + +License +------- + +Apache License, Version 2.0 + +Author Information +------------------ + +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py new file mode 100755 index 000000000..0dd343443 --- /dev/null +++ b/roles/openshift_facts/library/openshift_facts.py @@ -0,0 +1,482 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 + +DOCUMENTATION = ''' +--- +module: openshift_facts +short_description: OpenShift Facts +author: Jason DeTiberus +requirements: [ ] +''' +EXAMPLES = ''' +''' + +import ConfigParser +import copy + +class OpenShiftFactsUnsupportedRoleError(Exception): + pass + +class OpenShiftFactsFileWriteError(Exception): + pass + +class OpenShiftFacts(): + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn'] + + def __init__(self, role, filename, local_facts): + self.changed = False + self.filename = filename + if role not in self.known_roles: + raise OpenShiftFactsUnsupportedRoleError("Role %s is not supported by this module" % role) + self.role = role + self.facts = self.generate_facts(local_facts) + + def generate_facts(self, local_facts): + local_facts = self.init_local_facts(local_facts) + roles = local_facts.keys() + + defaults = self.get_defaults(roles) + provider_facts = self.init_provider_facts() + facts = self.apply_provider_facts(defaults, provider_facts, roles) + + facts = self.merge_facts(facts, local_facts) + facts['current_config'] = self.current_config(facts) + self.set_url_facts_if_unset(facts) + return dict(openshift=facts) + + + def set_url_facts_if_unset(self, facts): + if 'master' in facts: + for (url_var, use_ssl, port, default) in [ + ('api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['hostname']), + ('public_api_url', + facts['master']['api_use_ssl'], + facts['master']['api_port'], + facts['common']['public_hostname']), + ('console_url', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['hostname']), + ('public_console_url' 'console_use_ssl', + facts['master']['console_use_ssl'], + facts['master']['console_port'], + facts['common']['public_hostname'])]: + if url_var not in facts['master']: + scheme = 'https' if use_ssl else 'http' + netloc = default + if (scheme == 'https' and port != '443') or (scheme == 'http' and port != '80'): + netloc = "%s:%s" % (netloc, port) + facts['master'][url_var] = urlparse.urlunparse((scheme, netloc, '', '', '', '')) + + + # Query current OpenShift config and return a dictionary containing + # settings that may be valuable for determining actions that need to be + # taken in the playbooks/roles + def current_config(self, facts): + current_config=dict() + roles = [ role for role in facts if role not in ['common','provider'] ] + for role in roles: + if 'roles' in current_config: + current_config['roles'].append(role) + else: + current_config['roles'] = [role] + + # TODO: parse the /etc/sysconfig/openshift-{master,node} config to + # determine the location of files. + + # Query kubeconfig settings + kubeconfig_dir = '/var/lib/openshift/openshift.local.certificates' + if role == 'node': + kubeconfig_dir = os.path.join(kubeconfig_dir, "node-%s" % facts['common']['hostname']) + + kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig') + if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path): + try: + _, output, error = module.run_command(["/usr/bin/openshift", "ex", + "config", "view", "-o", + "json", + "--kubeconfig=%s" % kubeconfig_path], + check_rc=False) + config = json.loads(output) + + try: + for cluster in config['clusters']: + config['clusters'][cluster]['certificate-authority-data'] = 'masked' + except KeyError: + pass + try: + for user in config['users']: + config['users'][user]['client-certificate-data'] = 'masked' + config['users'][user]['client-key-data'] = 'masked' + except KeyError: + pass + + current_config['kubeconfig'] = config + except Exception: + pass + + return current_config + + + def apply_provider_facts(self, facts, provider_facts, roles): + if not provider_facts: + return facts + + use_openshift_sdn = provider_facts.get('use_openshift_sdn') + if isinstance(use_openshift_sdn, bool): + facts['common']['use_openshift_sdn'] = use_openshift_sdn + + common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')] + for h_var, ip_var in common_vars: + ip_value = provider_facts['network'].get(ip_var) + if ip_value: + facts['common'][ip_var] = ip_value + + facts['common'][h_var] = self.choose_hostname([provider_facts['network'].get(h_var)], facts['common'][ip_var]) + + if 'node' in roles: + ext_id = provider_facts.get('external_id') + if ext_id: + facts['node']['external_id'] = ext_id + + facts['provider'] = provider_facts + return facts + + def hostname_valid(self, hostname): + if (not hostname or + hostname.startswith('localhost') or + hostname.endswith('localdomain') or + len(hostname.split('.')) < 2): + return False + + return True + + def choose_hostname(self, hostnames=[], fallback=''): + hostname = fallback + + ips = [ i for i in hostnames if i is not None and re.match(r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z', i) ] + hosts = [ i for i in hostnames if i is not None and i not in set(ips) ] + + for host_list in (hosts, ips): + for h in host_list: + if self.hostname_valid(h): + return h + + return hostname + + def get_defaults(self, roles): + hardware_facts = self.get_hardware_facts() + net_facts = self.get_net_facts() + base_facts = self.get_base_facts() + + defaults = dict() + + common = dict(use_openshift_sdn=True) + ip = net_facts['default_ipv4']['address'] + common['ip'] = ip + common['public_ip'] = ip + + rc, output, error = module.run_command(['hostname', '-f']) + hostname_f = output.strip() if rc == 0 else '' + hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']] + hostname = self.choose_hostname(hostname_values) + + common['hostname'] = hostname + common['public_hostname'] = hostname + defaults['common'] = common + + if 'master' in roles: + # TODO: provide for a better way to override just the port, or just + # the urls, instead of forcing both, also to override the hostname + # without having to re-generate these urls later + master = dict(api_use_ssl=True, api_port='8443', + console_use_ssl=True, console_path='/console', + console_port='8443', etcd_use_ssl=False, + etcd_port='4001') + defaults['master'] = master + + if 'node' in roles: + node = dict(external_id=common['hostname'], pod_cidr='', + labels={}, annotations={}) + node['resources_cpu'] = hardware_facts['processor_cores'] + node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75) + defaults['node'] = node + + return defaults + + def merge_facts(self, orig, new): + facts = dict() + for key, value in orig.iteritems(): + if key in new: + if isinstance(value, dict): + facts[key] = self.merge_facts(value, new[key]) + else: + facts[key] = copy.copy(new[key]) + else: + facts[key] = copy.deepcopy(value) + new_keys = set(new.keys()) - set(orig.keys()) + for key in new_keys: + facts[key] = copy.deepcopy(new[key]) + return facts + + def query_metadata(self, metadata_url, headers=None, expect_json=False): + r, info = fetch_url(module, metadata_url, headers=headers) + if info['status'] != 200: + module.fail_json(msg='Failed to query metadata', result=r, + info=info) + if expect_json: + return module.from_json(r.read()) + else: + return [line.strip() for line in r.readlines()] + + def walk_metadata(self, metadata_url, headers=None, expect_json=False): + metadata = dict() + + for line in self.query_metadata(metadata_url, headers, expect_json): + if line.endswith('/') and not line == 'public-keys/': + key = line[:-1] + metadata[key]=self.walk_metadata(metadata_url + line, headers, + expect_json) + else: + results = self.query_metadata(metadata_url + line, headers, + expect_json) + if len(results) == 1: + metadata[line] = results.pop() + else: + metadata[line] = results + return metadata + + def get_provider_metadata(self, metadata_url, supports_recursive=False, + headers=None, expect_json=False): + if supports_recursive: + metadata = self.query_metadata(metadata_url, headers, expect_json) + else: + metadata = self.walk_metadata(metadata_url, headers, expect_json) + return metadata + + def get_hardware_facts(self): + if not hasattr(self, 'hardware_facts'): + self.hardware_facts = Hardware().populate() + return self.hardware_facts + + def get_base_facts(self): + if not hasattr(self, 'base_facts'): + self.base_facts = Facts().populate() + return self.base_facts + + def get_virt_facts(self): + if not hasattr(self, 'virt_facts'): + self.virt_facts = Virtual().populate() + return self.virt_facts + + def get_net_facts(self): + if not hasattr(self, 'net_facts'): + self.net_facts = Network(module).populate() + return self.net_facts + + def guess_host_provider(self): + # TODO: cloud provider facts should probably be submitted upstream + virt_facts = self.get_virt_facts() + hardware_facts = self.get_hardware_facts() + product_name = hardware_facts['product_name'] + product_version = hardware_facts['product_version'] + virt_type = virt_facts['virtualization_type'] + virt_role = virt_facts['virtualization_role'] + provider = None + metadata = None + + # TODO: this is not exposed through module_utils/facts.py in ansible, + # need to create PR for ansible to expose it + bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor') + if bios_vendor == 'Google': + provider = 'gce' + metadata_url = 'http://metadata.google.internal/computeMetadata/v1/?recursive=true' + headers = {'Metadata-Flavor': 'Google'} + metadata = self.get_provider_metadata(metadata_url, True, headers, + True) + + # Filter sshKeys and serviceAccounts from gce metadata + metadata['project']['attributes'].pop('sshKeys', None) + metadata['instance'].pop('serviceAccounts', None) + elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): + provider = 'ec2' + metadata_url = 'http://169.254.169.254/latest/meta-data/' + metadata = self.get_provider_metadata(metadata_url) + elif re.search(r'OpenStack', product_name): + provider = 'openstack' + metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' + metadata = self.get_provider_metadata(metadata_url, True, None, True) + ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' + metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) + + # Filter public_keys and random_seed from openstack metadata + metadata.pop('public_keys', None) + metadata.pop('random_seed', None) + return dict(name=provider, metadata=metadata) + + def normalize_provider_facts(self, provider, metadata): + if provider is None or metadata is None: + return {} + + # TODO: test for ipv6_enabled where possible (gce, aws do not support) + # and configure ipv6 facts if available + + # TODO: add support for setting user_data if available + + facts = dict(name=provider, metadata=metadata) + network = dict(interfaces=[], ipv6_enabled=False) + if provider == 'gce': + for interface in metadata['instance']['networkInterfaces']: + int_info = dict(ips=[interface['ip']], network_type=provider) + int_info['public_ips'] = [ ac['externalIp'] for ac in interface['accessConfigs'] ] + int_info['public_ips'].extend(interface['forwardedIps']) + _, _, network_id = interface['network'].rpartition('/') + int_info['network_id'] = network_id + network['interfaces'].append(int_info) + _, _, zone = metadata['instance']['zone'].rpartition('/') + facts['zone'] = zone + facts['external_id'] = metadata['instance']['id'] + + # Default to no sdn for GCE deployments + facts['use_openshift_sdn'] = False + + # GCE currently only supports a single interface + network['ip'] = network['interfaces'][0]['ips'][0] + network['public_ip'] = network['interfaces'][0]['public_ips'][0] + network['hostname'] = metadata['instance']['hostname'] + + # TODO: attempt to resolve public_hostname + network['public_hostname'] = network['public_ip'] + elif provider == 'ec2': + for interface in sorted(metadata['network']['interfaces']['macs'].values(), + key=lambda x: x['device-number']): + int_info = dict() + var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'} + for ips_var, int_var in var_map.iteritems(): + ips = interface[int_var] + int_info[ips_var] = [ips] if isinstance(ips, basestring) else ips + int_info['network_type'] = 'vpc' if 'vpc-id' in interface else 'classic' + int_info['network_id'] = interface['subnet-id'] if int_info['network_type'] == 'vpc' else None + network['interfaces'].append(int_info) + facts['zone'] = metadata['placement']['availability-zone'] + facts['external_id'] = metadata['instance-id'] + + # TODO: actually attempt to determine default local and public ips + # by using the ansible default ip fact and the ipv4-associations + # form the ec2 metadata + network['ip'] = metadata['local-ipv4'] + network['public_ip'] = metadata['public-ipv4'] + + # TODO: verify that local hostname makes sense and is resolvable + network['hostname'] = metadata['local-hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + network['public_hostname'] = metadata['public-hostname'] + elif provider == 'openstack': + # openstack ec2 compat api does not support network interfaces and + # the version tested on did not include the info in the openstack + # metadata api, should be updated if neutron exposes this. + + facts['zone'] = metadata['availability_zone'] + facts['external_id'] = metadata['uuid'] + network['ip'] = metadata['ec2_compat']['local-ipv4'] + network['public_ip'] = metadata['ec2_compat']['public-ipv4'] + + # TODO: verify local hostname makes sense and is resolvable + network['hostname'] = metadata['hostname'] + + # TODO: verify that public hostname makes sense and is resolvable + network['public_hostname'] = metadata['ec2_compat']['public-hostname'] + + facts['network'] = network + return facts + + def init_provider_facts(self): + provider_info = self.guess_host_provider() + provider_facts = self.normalize_provider_facts( + provider_info.get('name'), + provider_info.get('metadata') + ) + return provider_facts + + def get_facts(self): + # TODO: transform facts into cleaner format (openshift_ instead + # of openshift. + return self.facts + + def init_local_facts(self, facts={}): + changed = False + + local_facts = ConfigParser.SafeConfigParser() + local_facts.read(self.filename) + + section = self.role + if not local_facts.has_section(section): + local_facts.add_section(section) + changed = True + + for key, value in facts.iteritems(): + if isinstance(value, bool): + value = str(value) + if not value: + continue + if not local_facts.has_option(section, key) or local_facts.get(section, key) != value: + local_facts.set(section, key, value) + changed = True + + if changed and not module.check_mode: + try: + fact_dir = os.path.dirname(self.filename) + if not os.path.exists(fact_dir): + os.makedirs(fact_dir) + with open(self.filename, 'w') as fact_file: + local_facts.write(fact_file) + except (IOError, OSError) as e: + raise OpenShiftFactsFileWriteError("Could not create fact file: %s, error: %s" % (self.filename, e)) + self.changed = changed + + role_facts = dict() + for section in local_facts.sections(): + role_facts[section] = dict() + for opt, val in local_facts.items(section): + role_facts[section][opt] = val + return role_facts + + +def main(): + global module + module = AnsibleModule( + argument_spec = dict( + role=dict(default='common', + choices=OpenShiftFacts.known_roles, + required=False), + local_facts=dict(default={}, type='dict', required=False), + ), + supports_check_mode=True, + add_file_common_args=True, + ) + + role = module.params['role'] + local_facts = module.params['local_facts'] + fact_file = '/etc/ansible/facts.d/openshift.fact' + + openshift_facts = OpenShiftFacts(role, fact_file, local_facts) + + file_params = module.params.copy() + file_params['path'] = fact_file + file_args = module.load_file_common_arguments(file_params) + changed = module.set_fs_attributes_if_different(file_args, + openshift_facts.changed) + + return module.exit_json(changed=changed, + ansible_facts=openshift_facts.get_facts()) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.facts import * +from ansible.module_utils.urls import * +main() diff --git a/roles/openshift_facts/meta/main.yml b/roles/openshift_facts/meta/main.yml new file mode 100644 index 000000000..0be3afd24 --- /dev/null +++ b/roles/openshift_facts/meta/main.yml @@ -0,0 +1,15 @@ +--- +galaxy_info: + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: [] diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml new file mode 100644 index 000000000..5a7d10d25 --- /dev/null +++ b/roles/openshift_facts/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Gather OpenShift facts + openshift_facts: diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md index 2d898bc3b..9f9d0a613 100644 --- a/roles/openshift_master/README.md +++ b/roles/openshift_master/README.md @@ -13,20 +13,24 @@ Role Variables -------------- From this role: -| Name | Default value | -| -|------------------------------------------|-----------------------|----------------------------------------| -| openshift_master_manage_service_externally | False | Should the openshift-master role manage the openshift-master service? | -| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | -| openshift_node_ips | [] | List of the openshift node ip addresses, that we want to pre-register to the system when openshift-master starts up | -| openshift_registry_url | UNDEF (Optional) | Default docker registry to use | +| Name | Default value | | +|-------------------------------------|-----------------------|--------------------------------------------------| +| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | +| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when openshift-master starts up | +| openshift_registry_url | UNDEF | Default docker registry to use | +| openshift_master_api_port | UNDEF | | +| openshift_master_console_port | UNDEF | | +| openshift_master_api_url | UNDEF | | +| openshift_master_console_url | UNDEF | | +| openshift_master_public_api_url | UNDEF | | +| openshift_master_public_console_url | UNDEF | | From openshift_common: -| Name | Default Value | | -|-------------------------------|---------------------|---------------------| -| openshift_debug_level | 0 | Global openshift debug log verbosity | -| openshift_public_ip | UNDEF (Required) | Public IP address to use for this host | -| openshift_hostname | UNDEF (Required) | hostname to use for this instance | +| Name | Default Value | | +|-------------------------------|----------------|----------------------------------------| +| openshift_debug_level | 0 | Global openshift debug log verbosity | +| openshift_public_ip | UNDEF | Public IP address to use for this host | +| openshift_hostname | UNDEF | hostname to use for this instance | Dependencies ------------ diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index 0159afbb5..87fb347a8 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -1,16 +1,17 @@ --- -openshift_master_manage_service_externally: false -openshift_master_debug_level: "{{ openshift_debug_level | default(0) }}" openshift_node_ips: [] + +# TODO: update setting these values based on the facts +# TODO: update for console port change os_firewall_allow: - service: etcd embedded port: 4001/tcp -- service: etcd peer - port: 7001/tcp - service: OpenShift api https port: 8443/tcp -- service: OpenShift web console https - port: 8444/tcp os_firewall_deny: - service: OpenShift api http port: 8080/tcp +- service: former OpenShift web console port + port: 8444/tcp +- service: former etcd peer port + port: 7001/tcp diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml index 503d08d41..6fd4dfb51 100644 --- a/roles/openshift_master/handlers/main.yml +++ b/roles/openshift_master/handlers/main.yml @@ -1,4 +1,3 @@ --- - name: restart openshift-master service: name=openshift-master state=restarted - when: not openshift_master_manage_service_externally diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 52f5f694c..aa615df39 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -1,19 +1,37 @@ --- -# TODO: allow for overriding default ports where possible -# TODO: if setting up multiple masters, will need to predistribute the certs -# to the additional masters before starting openshift-master +# TODO: actually have api_port, api_use_ssl, console_port, console_use_ssl, +# etcd_use_ssl actually change the master config. + +- name: Set master OpenShift facts + openshift_facts: + role: 'master' + local_facts: + debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}" + api_port: "{{ openshift_master_api_port | default(None) }}" + api_url: "{{ openshift_master_api_url | default(None) }}" + api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" + public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_port: "{{ openshift_master_console_port | default(None) }}" + console_url: "{{ openshift_master_console_url | default(None) }}" + console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" + public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" - name: Install OpenShift Master package yum: pkg=openshift-master state=installed +# TODO: We should pre-generate the master config and point to the generated +# config rather than setting command line flags here - name: Configure OpenShift settings lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--public-master={{ openshift_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift_master_debug_level }}\"" + line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\"" notify: - restart openshift-master +# TODO: should this be populated by a fact based on the deployment type +# (origin, online, enterprise)? - name: Set default registry url lineinfile: dest: /etc/sysconfig/openshift-master @@ -23,34 +41,18 @@ notify: - restart openshift-master -- name: Set master OpenShift facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: master - option: debug_level - value: "{{ openshift_master_debug_level }}" - - section: master - option: public_ip - value: "{{ openshift_public_ip }}" - - section: master - option: externally_managed - value: "{{ openshift_master_manage_service_externally }}" - - name: Start and enable openshift-master service: name=openshift-master enabled=yes state=started - when: not openshift_master_manage_service_externally - register: result - -- name: Disable openshift-master if openshift-master is managed externally - service: name=openshift-master enabled=false - when: openshift_master_manage_service_externally - name: Create .kube directory file: path: /root/.kube state: directory mode: 0700 + +# TODO: Update this file if the contents of the source file are not present in +# the dest file, will need to make sure to ignore things that could be added - name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/admin/.kubeconfig /root/.kube/.kubeconfig + command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig args: creates: /root/.kube/.kubeconfig diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml deleted file mode 100644 index 9a8c4bba2..000000000 --- a/roles/openshift_master/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_host_type: master diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md index c9b4eab34..83359f164 100644 --- a/roles/openshift_node/README.md +++ b/roles/openshift_node/README.md @@ -16,10 +16,7 @@ Role Variables From this role: | Name | Default value | | |------------------------------------------|-----------------------|----------------------------------------| -| openshift_node_manage_service_externally | False | Should the openshift-node role manage the openshift-node service? | | openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-node | -| openshift_master_public_ips | UNDEF (Required) | List of the public IPs for the openhift-master hosts | -| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | | openshift_registry_url | UNDEF (Optional) | Default docker registry to use | From openshift_common: diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 6dc73a96e..df7ec41b6 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,4 @@ --- -openshift_node_manage_service_externally: false -openshift_node_debug_level: "{{ openshift_debug_level | default(0) }}" os_firewall_allow: - service: OpenShift kubelet port: 10250/tcp diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index f7aa36d88..ca2992637 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -1,4 +1,4 @@ --- - name: restart openshift-node service: name=openshift-node state=restarted - when: not openshift_node_manage_service_externally + when: not openshift.common.use_openshift_sdn|bool diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index c039e3f05..8cfef0e15 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -1,4 +1,12 @@ --- +# TODO: allow for overriding default ports where possible +# TODO: trigger the external service when restart is needed +- name: Set node OpenShift facts + openshift_facts: + role: 'node' + local_facts: + debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}" + - name: Test if node certs and config exist stat: path={{ item }} failed_when: not result.stat.exists @@ -23,7 +31,7 @@ lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--hostname={{ openshift_hostname }} --loglevel={{ openshift_node_debug_level }} --create-certs=false\"" + line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\"" notify: - restart openshift-node @@ -36,23 +44,10 @@ notify: - restart openshift-node -- name: Set OpenShift node facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: node - option: debug_level - value: "{{ openshift_node_debug_level }}" - - section: node - option: public_ip - value: "{{ openshift_public_ip }}" - - section: node - option: externally_managed - value: "{{ openshift_node_manage_service_externally }}" - - name: Start and enable openshift-node service: name=openshift-node enabled=yes state=started - when: not openshift_node_manage_service_externally + when: not openshift.common.use_openshift_sdn|bool - name: Disable openshift-node if openshift-node is managed externally service: name=openshift-node enabled=false - when: openshift_node_manage_service_externally + when: openshift.common.use_openshift_sdn|bool diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml deleted file mode 100644 index 9841d52f9..000000000 --- a/roles/openshift_node/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_host_type: node diff --git a/roles/openshift_register_nodes/README.md b/roles/openshift_register_nodes/README.md index 225dd44b9..b96faa044 100644 --- a/roles/openshift_register_nodes/README.md +++ b/roles/openshift_register_nodes/README.md @@ -1,38 +1,34 @@ -Role Name -========= +OpenShift Register Nodes +======================== -A brief description of the role goes here. +TODO Requirements ------------ -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. +TODO Role Variables -------------- -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. +TODO Dependencies ------------ -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. +TODO Example Playbook ---------------- -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } +TODO License ------- -BSD +Apache License Version 2.0 Author Information ------------------ -An optional section for the role authors to include contact information, or a website (HTML is not allowed). +Jason DeTiberus (jdetiber@redhat.com) diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py old mode 100644 new mode 100755 index 409215616..8ebeb087a --- a/roles/openshift_register_nodes/library/kubernetes_register_node.py +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -214,7 +214,8 @@ class Node: resources = NodeResources(version, cpu, memory), cidr = podCIDR, labels = labels, - annotations = annotations + annotations = annotations, + externalID = externalID ) elif version == 'v1beta3': metadata = dict(name = name, diff --git a/roles/openshift_register_nodes/meta/main.yml b/roles/openshift_register_nodes/meta/main.yml index 7b1f0ef0a..e40a152c1 100644 --- a/roles/openshift_register_nodes/meta/main.yml +++ b/roles/openshift_register_nodes/meta/main.yml @@ -1,128 +1,17 @@ --- galaxy_info: - author: your name - description: - company: your company (optional) - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: license (GPLv2, CC-BY, etc) - min_ansible_version: 1.2 - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - #- name: EL - # versions: - # - all - # - 5 - # - 6 - # - 7 - #- name: GenericUNIX - # versions: - # - all - # - any - #- name: Fedora - # versions: - # - all - # - 16 - # - 17 - # - 18 - # - 19 - # - 20 - #- name: SmartOS - # versions: - # - all - # - any - #- name: opensuse - # versions: - # - all - # - 12.1 - # - 12.2 - # - 12.3 - # - 13.1 - # - 13.2 - #- name: Amazon - # versions: - # - all - # - 2013.03 - # - 2013.09 - #- name: GenericBSD - # versions: - # - all - # - any - #- name: FreeBSD - # versions: - # - all - # - 8.0 - # - 8.1 - # - 8.2 - # - 8.3 - # - 8.4 - # - 9.0 - # - 9.1 - # - 9.1 - # - 9.2 - #- name: Ubuntu - # versions: - # - all - # - lucid - # - maverick - # - natty - # - oneiric - # - precise - # - quantal - # - raring - # - saucy - # - trusty - #- name: SLES - # versions: - # - all - # - 10SP3 - # - 10SP4 - # - 11 - # - 11SP1 - # - 11SP2 - # - 11SP3 - #- name: GenericLinux - # versions: - # - all - # - any - #- name: Debian - # versions: - # - all - # - etch - # - lenny - # - squeeze - # - wheezy - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - #- cloud - #- cloud:ec2 - #- cloud:gce - #- cloud:rax - #- clustering - #- database - #- database:nosql - #- database:sql - #- development - #- monitoring - #- networking - #- packaging - #- system - #- web -dependencies: [] - # List your role dependencies here, one per line. Only - # dependencies available via galaxy should be listed here. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - + author: Jason DeTiberus + description: + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + categories: + - cloud + - system +dependencies: +- { role: openshift_facts } + diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 59216fc87..7319b88b1 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -1,18 +1,20 @@ --- -# TODO: support configuration for multiple masters, currently hardcoding -# the info from the first master +# TODO: support new create-config command to generate node certs and config +# TODO: recreate master/node configs if settings that affect the configs +# change (hostname, public_hostname, ip, public_ip, etc) # TODO: create a failed_when condition - name: Create node server certificates command: > /usr/bin/openshift admin create-server-cert --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/server.key - --hostnames={{ [openshift_hostname, openshift_public_hostname, openshift_ip, openshift_public_ip]|join(",") }} + --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key + --hostnames={{ [item.openshift.common.hostname, + item.openshift.common.public_hostname]|unique|join(",") }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/server.crt" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt" with_items: openshift_nodes register: server_cert_result @@ -21,48 +23,42 @@ command: > /usr/bin/openshift admin create-node-cert --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key - --node-name={{ item.openshift_node_hostname }} + --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt + --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key + --node-name={{ item.openshift.common.hostname }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/cert.crt" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt" with_items: openshift_nodes register: node_cert_result -# TODO: re-create kubeconfig if certs were regenerated, not just if -# .kubeconfig doesn't exist # TODO: create a failed_when condition - name: Create kubeconfigs for nodes command: > /usr/bin/openshift admin create-kubeconfig - --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/cert.crt - --client-key={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/key.key - --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig - --master={{ openshift_master_urls[0] }} - --public-master={{ openshift_master_public_urls[0] }} + --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt + --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key + --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} args: chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift_node_hostname }}/.kubeconfig" + creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig" with_items: openshift_nodes register: kubeconfig_result -# TODO: generate the node configs (openshift start node --write-config -# --config='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/node.yaml' -# --kubeconfig='{{ openshift_cert_dir }}/node-{{ item.openshift_node_hostname }}/.kubeconfig' -# will need to modify the generated node config as needed -# (servingInfo.{certFile,clientCA,keyFile}) - - name: Register unregistered nodes kubernetes_register_node: - name: "{{ item.openshift_node_name }}" + client_user: openshift-client + name: "{{ item.openshift.common.hostname }}" api_version: "{{ openshift_kube_api_version }}" - cpu: "{{ item.openshift_node_cpu if item.openshift_node_cpu else None }}" - memory: "{{ item.openshift_node_memory if item.openshift_node_memory else None }}" - pod_cidr: "{{ item.openshift_node_pod_cidr if item.openshift_node_pod_cidr else None }}" - host_ip: "{{ item.openshift_node_host_ip }}" - labels: "{{ item.openshift_node_labels if item.openshift_node_labels else {} }}" - annotations: "{{ item.openshift_node_annotations if item.openshift_node_annotations else {} }}" + cpu: "{{ item.openshift.node.resources_cpu | default(None) }}" + memory: "{{ item.openshift.node.resources_memory | default(None) }}" + pod_cidr: "{{ item.openshift.node.pod_cidr | default(None) }}" + host_ip: "{{ item.openshift.common.ip }}" + labels: "{{ item.openshift.node.labels | default({}) }}" + annotations: "{{ item.openshift.node.annotations | default({}) }}" + external_id: "{{ item.openshift.node.external_id }}" # TODO: support customizing other attributes such as: client_config, # client_cluster, client_context, client_user # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml index 6fe2bf621..1730207f4 100644 --- a/roles/openshift_repos/defaults/main.yaml +++ b/roles/openshift_repos/defaults/main.yaml @@ -1,5 +1,7 @@ --- # TODO: once we are able to configure/deploy origin using the openshift roles, # then we should default to origin + +# TODO: push the defaulting of these values to the openshift_facts module openshift_deployment_type: online openshift_additional_repos: {} diff --git a/roles/openshift_repos/meta/main.yml b/roles/openshift_repos/meta/main.yml index cc18c453c..0558b822c 100644 --- a/roles/openshift_repos/meta/main.yml +++ b/roles/openshift_repos/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_facts } diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 6219c4906..bb1551d37 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -1,6 +1,12 @@ --- # TODO: Add flag for enabling EPEL repo, default to false +# TODO: Add subscription-management config, with parameters +# for username, password, poolid(name), and official repos to +# enable/disable. Might need to make a module that extends the +# subscription management module to take a poolid and enable/disable the +# proper repos correctly. + - assert: that: openshift_deployment_type in known_openshift_deployment_types diff --git a/roles/openshift_sdn_master/defaults/main.yml b/roles/openshift_sdn_master/defaults/main.yml deleted file mode 100644 index da7655546..000000000 --- a/roles/openshift_sdn_master/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_sdn_master_debug_level: "{{ openshift_debug_level | default(0) }}" diff --git a/roles/openshift_sdn_master/meta/main.yml b/roles/openshift_sdn_master/meta/main.yml index e6e5514d1..5de32cc13 100644 --- a/roles/openshift_sdn_master/meta/main.yml +++ b/roles/openshift_sdn_master/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml index e1761afdc..f2d61043b 100644 --- a/roles/openshift_sdn_master/tasks/main.yml +++ b/roles/openshift_sdn_master/tasks/main.yml @@ -1,4 +1,13 @@ --- +# TODO: add task to set the sdn subnet if openshift-sdn-master hasn't been +# started yet + +- name: Set master sdn OpenShift facts + openshift_facts: + role: 'master_sdn' + local_facts: + debug_level: "{{ openshift_master_sdn_debug_level | default(openshift.common.debug_level) }}" + - name: Install openshift-sdn-master yum: pkg: openshift-sdn-master @@ -8,17 +17,10 @@ lineinfile: dest: /etc/sysconfig/openshift-sdn-master regexp: '^OPTIONS=' - line: "OPTIONS=\"-v={{ openshift_sdn_master_debug_level }}\"" + line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\"" notify: - restart openshift-sdn-master -- name: Set openshift-sdn-master facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: sdn-master - option: debug_level - value: "{{ openshift_sdn_master_debug_level }}" - - name: Enable openshift-sdn-master service: name: openshift-sdn-master diff --git a/roles/openshift_sdn_node/README.md b/roles/openshift_sdn_node/README.md index 2da2d74eb..e6b6a9503 100644 --- a/roles/openshift_sdn_node/README.md +++ b/roles/openshift_sdn_node/README.md @@ -17,12 +17,6 @@ From this role: | openshift_sdn_node_debug_level | openshift_debug_level | Verbosity of the debug logs for openshift-master | -From openshift_node: -| Name | Default value | | -|-----------------------|------------------|--------------------------------------| -| openshift_master_ips | UNDEF (Required) | List of IP addresses for the openshift-master hosts to be used for node -> master communication | - - From openshift_common: | Name | Default value | | |-------------------------------|---------------------|----------------------------------------| diff --git a/roles/openshift_sdn_node/defaults/main.yml b/roles/openshift_sdn_node/defaults/main.yml deleted file mode 100644 index 9612d9d91..000000000 --- a/roles/openshift_sdn_node/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -openshift_sdn_node_debug_level: "{{ openshift_debug_level | default(0) }}" diff --git a/roles/openshift_sdn_node/meta/main.yml b/roles/openshift_sdn_node/meta/main.yml index ab45ff51e..ffe10f836 100644 --- a/roles/openshift_sdn_node/meta/main.yml +++ b/roles/openshift_sdn_node/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info: - 7 categories: - cloud -dependencies: [] +dependencies: +- { role: openshift_common } diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml index ff05a6972..729c28879 100644 --- a/roles/openshift_sdn_node/tasks/main.yml +++ b/roles/openshift_sdn_node/tasks/main.yml @@ -1,4 +1,10 @@ --- +- name: Set node sdn OpenShift facts + openshift_facts: + role: 'node_sdn' + local_facts: + debug_level: "{{ openshift_node_sdn_debug_level | default(openshift.common.debug_level) }}" + - name: Install openshift-sdn-node yum: pkg: openshift-sdn-node @@ -14,28 +20,19 @@ backrefs: yes with_items: - regex: '^(OPTIONS=)' - line: '\1"-v={{ openshift_sdn_node_debug_level }} -hostname={{ openshift_hostname }}"' + line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"' - regex: '^(MASTER_URL=)' - line: '\1"http://{{ openshift_master_ips | first }}:4001"' + line: '\1"{{ openshift_sdn_master_url }}"' - regex: '^(MINION_IP=)' - line: '\1"{{ openshift_public_ip }}"' + line: '\1"{{ openshift.common.ip }}"' # TODO lock down the insecure-registry config to a more sane value than # 0.0.0.0/0 - regex: '^(DOCKER_OPTIONS=)' line: '\1"--insecure-registry=0.0.0.0/0 -b=lbr0 --mtu=1450 --selinux-enabled"' notify: restart openshift-sdn-node -- name: Set openshift-sdn-node facts - include: "{{ role_path | dirname }}/openshift_common/tasks/set_facts.yml" - facts: - - section: sdn-node - option: debug_level - value: "{{ openshift_sdn_node_debug_level }}" - -# fixme: Once the openshift_cluster playbook is published state should be started -# Always bounce service to pick up new credentials - name: Start and enable openshift-sdn-node service: name: openshift-sdn-node enabled: yes - state: restarted + state: started diff --git a/roles/os_env_extras_node/tasks/main.yml b/roles/os_env_extras_node/tasks/main.yml new file mode 100644 index 000000000..208065df2 --- /dev/null +++ b/roles/os_env_extras_node/tasks/main.yml @@ -0,0 +1,5 @@ +--- +# From the origin rpm there exists instructions on how to +# setup origin properly. The following steps come from there +- name: Change root to be in the Docker group + user: name=root groups=dockerroot append=yes diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py old mode 100644 new mode 100755 index 6a018d022..90588d2ae --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -1,5 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 from subprocess import call, check_output diff --git a/roles/os_firewall/meta/main.yml b/roles/os_firewall/meta/main.yml index 7a8cef6c5..8592371e8 100644 --- a/roles/os_firewall/meta/main.yml +++ b/roles/os_firewall/meta/main.yml @@ -1,3 +1,4 @@ +--- galaxy_info: author: Jason DeTiberus description: os_firewall diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 469cfab6f..b6bddd5c5 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -3,6 +3,7 @@ yum: name: firewalld state: present + register: install_result - name: Check if iptables-services is installed command: rpm -q iptables-services @@ -20,6 +21,10 @@ - ip6tables when: pkg_check.rc == 0 +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + - name: Start and enable firewalld service service: name: firewalld diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 87e77c083..7b5c00a9b 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -6,6 +6,7 @@ with_items: - iptables - iptables-services + register: install_result - name: Check if firewalld is installed command: rpm -q firewalld @@ -20,14 +21,15 @@ enabled: no when: pkg_check.rc == 0 -- name: Start and enable iptables services +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +- name: Start and enable iptables service service: - name: "{{ item }}" + name: iptables state: started enabled: yes - with_items: - - iptables - - ip6tables register: result - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail -- cgit v1.2.3 From 187e11209d0b7494ffacbabde569c14a8d0ebe2f Mon Sep 17 00:00:00 2001 From: Ricardo Bernardeli Date: Mon, 13 Apr 2015 09:20:38 +1000 Subject: Add extra information for AWS README Make security group an environment variable with default to ‘public’ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README_AWS.md | 22 ++++++++++++++++++++-- .../aws/openshift-cluster/launch_instances.yml | 3 ++- 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'playbooks/aws') diff --git a/README_AWS.md b/README_AWS.md index e877f34c6..37f4c5f51 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -14,7 +14,7 @@ Create a credentials file export AWS_ACCESS_KEY_ID='AKIASTUFF' export AWS_SECRET_ACCESS_KEY='STUFF' ``` -1. source this file +2. source this file ``` source ~/.aws_creds ``` @@ -23,7 +23,7 @@ Note: You must source this file in each shell that you want to run cloud.rb (Optional) Setup your $HOME/.ssh/config file ------------------------------------------- -In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' +In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' to setup a private key file to allow ansible to connect to the created hosts. To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS. @@ -34,6 +34,24 @@ Host *.compute-1.amazonaws.com Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances. +(Optional) Choose where the cluster will be launched +---------------------------------------------------- + +By default, a cluster is launched with the following configuration: + +- Instance type: m3.large +- AMI: ami-307b3658 +- Region: us-east-1 +- Keypair name: libra +- Security group: public + +If needed, these values can be changed by setting environment variables on your system. + +- export ec2_instance_type='m3.large' +- export ec2_ami='ami-307b3658' +- export ec2_region='us-east-1' +- export ec2_keypair='libra' +- export ec2_security_group='public' Install Dependencies -------------------- diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml index e4d5952fd..9d645fbe5 100644 --- a/playbooks/aws/openshift-cluster/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/launch_instances.yml @@ -5,6 +5,7 @@ machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}" machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" + security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" env: "{{ cluster }}" host_type: "{{ type }}" env_host_type: "{{ cluster }}-openshift-{{ type }}" @@ -14,7 +15,7 @@ state: present region: "{{ machine_region }}" keypair: "{{ machine_keypair }}" - group: ['public'] + group: "{{ security_group }}" instance_type: "{{ machine_type }}" image: "{{ machine_image }}" count: "{{ instances | oo_len }}" -- cgit v1.2.3 From 6a4b7a5eb6c4b5e747bab795e2428d7c3992f559 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 1 Apr 2015 15:09:19 -0400 Subject: Configuration updates for latest builds and major refactor Configuration updates for latest builds - Switch to using create-node-config - Switch sdn services to use etcd over SSL - This re-uses the client certificate deployed on each node - Additional node registration changes - Do not assume that metadata service is available in openshift_facts module - Call systemctl daemon-reload after installing openshift-master, openshift-sdn-master, openshift-node, openshift-sdn-node - Fix bug overriding openshift_hostname and openshift_public_hostname in byo playbooks - Start moving generated configs to /etc/openshift - Some custom module cleanup - Add known issue with ansible-1.9 to README_OSE.md - Update to genericize the kubernetes_register_node module - Default to use kubectl for commands - Allow for overriding kubectl_cmd - In openshift_register_node role, override kubectl_cmd to openshift_kube - Set default openshift_registry_url for enterprise when deployment_type is enterprise - Fix openshift_register_node for client config change - Ensure that master certs directory is created - Add roles and filter_plugin symlinks to playbooks/common/openshift-master and node - Allow non-root user with sudo nopasswd access - Updates for README_OSE.md - Update byo inventory for adding additional comments - Updates for node cert/config sync to work with non-root user using sudo - Move node config/certs to /etc/openshift/node - Don't use path for mktemp. addresses: https://github.com/openshift/openshift-ansible/issues/154 Create common playbooks - create common/openshift-master/config.yml - create common/openshift-node/config.yml - update playbooks to use new common playbooks - update launch playbooks to call update playbooks - fix openshift_registry and openshift_node_ip usage Set default deployment type to origin - openshift_repo updates for enabling origin deployments - also separate repo and gpgkey file structure - remove kubernetes repo since it isn't currently needed - full deployment type support for bin/cluster - honor OS_DEPLOYMENT_TYPE env variable - add --deployment-type option, which will override OS_DEPLOYMENT_TYPE if set - if neither OS_DEPLOYMENT_TYPE or --deployment-type is set, defaults to origin installs Additional changes: - Add separate config action to bin/cluster that runs ansible config but does not update packages - Some more duplication reduction in cluster playbooks. - Rename task files in playbooks dirs to have tasks in their name for clarity. - update aws/gce scripts to use a directory for inventory (otherwise when there are no hosts returned from dynamic inventory there is an error) libvirt refactor and update - add libvirt dynamic inventory - updates to use dynamic inventory for libvirt --- README_OSE.md | 191 +++-- README_libvirt.md | 78 +- bin/cluster | 91 ++- filter_plugins/oo_filters.py | 16 +- inventory/aws/ec2.ini | 62 -- inventory/aws/ec2.py | 798 --------------------- inventory/aws/group_vars/all | 2 - inventory/aws/hosts/ec2.ini | 62 ++ inventory/aws/hosts/ec2.py | 798 +++++++++++++++++++++ inventory/aws/hosts/hosts | 1 + inventory/byo/group_vars/all | 28 - inventory/byo/hosts | 26 +- inventory/gce/gce.py | 287 -------- inventory/gce/group_vars/all | 2 - inventory/gce/hosts/gce.py | 287 ++++++++ inventory/gce/hosts/hosts | 1 + inventory/libvirt/group_vars/all | 2 - inventory/libvirt/hosts | 2 - inventory/libvirt/hosts/hosts | 1 + inventory/libvirt/hosts/libvirt.ini | 20 + inventory/libvirt/hosts/libvirt_generic.py | 179 +++++ playbooks/aws/openshift-cluster/config.yml | 36 + playbooks/aws/openshift-cluster/launch.yml | 73 +- .../aws/openshift-cluster/launch_instances.yml | 63 -- playbooks/aws/openshift-cluster/list.yml | 15 +- .../openshift-cluster/tasks/launch_instances.yml | 69 ++ playbooks/aws/openshift-cluster/terminate.yml | 24 +- playbooks/aws/openshift-cluster/update.yml | 25 +- playbooks/aws/openshift-cluster/vars.yml | 19 + playbooks/aws/openshift-master/config.yml | 27 +- playbooks/aws/openshift-master/launch.yml | 8 +- playbooks/aws/openshift-master/terminate.yml | 17 +- playbooks/aws/openshift-master/vars.yml | 3 - playbooks/aws/openshift-node/config.yml | 110 +-- playbooks/aws/openshift-node/launch.yml | 10 +- playbooks/aws/openshift-node/terminate.yml | 17 +- playbooks/aws/openshift-node/vars.yml | 3 - playbooks/byo/openshift-master/config.yml | 20 +- playbooks/byo/openshift-node/config.yml | 90 +-- playbooks/byo/openshift_facts.yml | 10 + playbooks/common/openshift-cluster/config.yml | 4 + playbooks/common/openshift-cluster/filter_plugins | 1 + playbooks/common/openshift-cluster/roles | 1 + .../set_master_launch_facts_tasks.yml | 11 + .../set_node_launch_facts_tasks.yml | 11 + .../update_repos_and_packages.yml | 7 + playbooks/common/openshift-master/config.yml | 19 + playbooks/common/openshift-master/filter_plugins | 1 + playbooks/common/openshift-master/roles | 1 + playbooks/common/openshift-node/config.yml | 121 ++++ playbooks/common/openshift-node/filter_plugins | 1 + playbooks/common/openshift-node/roles | 1 + playbooks/gce/openshift-cluster/config.yml | 37 + playbooks/gce/openshift-cluster/launch.yml | 72 +- .../gce/openshift-cluster/launch_instances.yml | 44 -- playbooks/gce/openshift-cluster/list.yml | 15 +- .../openshift-cluster/tasks/launch_instances.yml | 42 ++ playbooks/gce/openshift-cluster/terminate.yml | 22 +- playbooks/gce/openshift-cluster/update.yml | 25 +- playbooks/gce/openshift-cluster/vars.yml | 14 + playbooks/gce/openshift-master/config.yml | 24 +- playbooks/gce/openshift-master/launch.yml | 6 +- playbooks/gce/openshift-master/terminate.yml | 11 +- playbooks/gce/openshift-master/vars.yml | 3 - playbooks/gce/openshift-node/config.yml | 106 +-- playbooks/gce/openshift-node/launch.yml | 6 +- playbooks/gce/openshift-node/terminate.yml | 11 +- playbooks/gce/openshift-node/vars.yml | 3 - playbooks/libvirt/openshift-cluster/config.yml | 38 + playbooks/libvirt/openshift-cluster/launch.yml | 81 +-- .../libvirt/openshift-cluster/launch_instances.yml | 102 --- playbooks/libvirt/openshift-cluster/list.yml | 50 +- .../openshift-cluster/tasks/configure_libvirt.yml | 6 + .../tasks/configure_libvirt_network.yml | 27 + .../tasks/configure_libvirt_storage_pool.yml | 27 + .../openshift-cluster/tasks/launch_instances.yml | 104 +++ .../libvirt/openshift-cluster/templates/domain.xml | 67 ++ .../libvirt/openshift-cluster/templates/meta-data | 3 + .../openshift-cluster/templates/network.xml | 23 + .../libvirt/openshift-cluster/templates/user-data | 23 + playbooks/libvirt/openshift-cluster/terminate.yml | 69 +- playbooks/libvirt/openshift-cluster/update.yml | 18 + playbooks/libvirt/openshift-cluster/vars.yml | 38 +- playbooks/libvirt/openshift-master/config.yml | 21 - playbooks/libvirt/openshift-master/filter_plugins | 1 - playbooks/libvirt/openshift-master/roles | 1 - playbooks/libvirt/openshift-master/vars.yml | 1 - playbooks/libvirt/openshift-node/config.yml | 102 --- playbooks/libvirt/openshift-node/filter_plugins | 1 - playbooks/libvirt/openshift-node/roles | 1 - playbooks/libvirt/openshift-node/vars.yml | 1 - playbooks/libvirt/templates/domain.xml | 62 -- playbooks/libvirt/templates/meta-data | 2 - playbooks/libvirt/templates/user-data | 10 - roles/openshift_common/tasks/main.yml | 4 +- roles/openshift_common/vars/main.yml | 4 + roles/openshift_facts/library/openshift_facts.py | 92 ++- roles/openshift_master/tasks/main.yml | 64 +- roles/openshift_master/vars/main.yml | 5 + roles/openshift_node/tasks/main.yml | 32 +- roles/openshift_node/vars/main.yml | 2 + roles/openshift_register_nodes/defaults/main.yml | 3 - .../library/kubernetes_register_node.py | 63 +- roles/openshift_register_nodes/tasks/main.yml | 64 +- roles/openshift_register_nodes/vars/main.yml | 7 + roles/openshift_repos/README.md | 2 +- roles/openshift_repos/defaults/main.yaml | 5 - .../files/online/RPM-GPG-KEY-redhat-beta | 61 -- .../files/online/RPM-GPG-KEY-redhat-release | 63 -- .../files/online/epel7-kubernetes.repo | 6 - .../files/online/epel7-openshift.repo | 6 - .../files/online/gpg_keys/RPM-GPG-KEY-redhat-beta | 61 ++ .../online/gpg_keys/RPM-GPG-KEY-redhat-release | 63 ++ .../files/online/oso-rhui-rhel-7-extras.repo | 23 - .../files/online/oso-rhui-rhel-7-server.repo | 21 - .../files/online/repos/epel7-openshift.repo | 6 + .../files/online/repos/oso-rhui-rhel-7-extras.repo | 23 + .../files/online/repos/oso-rhui-rhel-7-server.repo | 21 + .../files/online/repos/rhel-7-libra-candidate.repo | 11 + .../files/online/rhel-7-libra-candidate.repo | 11 - .../repos/maxamillion-origin-next-epel-7.repo | 7 + roles/openshift_repos/tasks/main.yaml | 14 +- roles/openshift_repos/templates/yum_repo.j2 | 1 - roles/openshift_sdn_master/tasks/main.yml | 11 +- roles/openshift_sdn_node/tasks/main.yml | 11 +- .../library/os_firewall_manage_iptables.py | 3 +- 126 files changed, 3165 insertions(+), 2677 deletions(-) delete mode 100644 inventory/aws/ec2.ini delete mode 100755 inventory/aws/ec2.py delete mode 100644 inventory/aws/group_vars/all create mode 100644 inventory/aws/hosts/ec2.ini create mode 100755 inventory/aws/hosts/ec2.py create mode 100644 inventory/aws/hosts/hosts delete mode 100644 inventory/byo/group_vars/all delete mode 100755 inventory/gce/gce.py delete mode 100644 inventory/gce/group_vars/all create mode 100755 inventory/gce/hosts/gce.py create mode 100644 inventory/gce/hosts/hosts delete mode 100644 inventory/libvirt/group_vars/all delete mode 100644 inventory/libvirt/hosts create mode 100644 inventory/libvirt/hosts/hosts create mode 100644 inventory/libvirt/hosts/libvirt.ini create mode 100755 inventory/libvirt/hosts/libvirt_generic.py create mode 100644 playbooks/aws/openshift-cluster/config.yml delete mode 100644 playbooks/aws/openshift-cluster/launch_instances.yml create mode 100644 playbooks/aws/openshift-cluster/tasks/launch_instances.yml delete mode 100644 playbooks/aws/openshift-master/vars.yml delete mode 100644 playbooks/aws/openshift-node/vars.yml create mode 100644 playbooks/byo/openshift_facts.yml create mode 100644 playbooks/common/openshift-cluster/config.yml create mode 120000 playbooks/common/openshift-cluster/filter_plugins create mode 120000 playbooks/common/openshift-cluster/roles create mode 100644 playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml create mode 100644 playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml create mode 100644 playbooks/common/openshift-cluster/update_repos_and_packages.yml create mode 100644 playbooks/common/openshift-master/config.yml create mode 120000 playbooks/common/openshift-master/filter_plugins create mode 120000 playbooks/common/openshift-master/roles create mode 100644 playbooks/common/openshift-node/config.yml create mode 120000 playbooks/common/openshift-node/filter_plugins create mode 120000 playbooks/common/openshift-node/roles create mode 100644 playbooks/gce/openshift-cluster/config.yml delete mode 100644 playbooks/gce/openshift-cluster/launch_instances.yml create mode 100644 playbooks/gce/openshift-cluster/tasks/launch_instances.yml delete mode 100644 playbooks/gce/openshift-master/vars.yml delete mode 100644 playbooks/gce/openshift-node/vars.yml create mode 100644 playbooks/libvirt/openshift-cluster/config.yml delete mode 100644 playbooks/libvirt/openshift-cluster/launch_instances.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml create mode 100644 playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml create mode 100644 playbooks/libvirt/openshift-cluster/templates/domain.xml create mode 100644 playbooks/libvirt/openshift-cluster/templates/meta-data create mode 100644 playbooks/libvirt/openshift-cluster/templates/network.xml create mode 100644 playbooks/libvirt/openshift-cluster/templates/user-data create mode 100644 playbooks/libvirt/openshift-cluster/update.yml delete mode 100644 playbooks/libvirt/openshift-master/config.yml delete mode 120000 playbooks/libvirt/openshift-master/filter_plugins delete mode 120000 playbooks/libvirt/openshift-master/roles delete mode 100644 playbooks/libvirt/openshift-master/vars.yml delete mode 100644 playbooks/libvirt/openshift-node/config.yml delete mode 120000 playbooks/libvirt/openshift-node/filter_plugins delete mode 120000 playbooks/libvirt/openshift-node/roles delete mode 100644 playbooks/libvirt/openshift-node/vars.yml delete mode 100644 playbooks/libvirt/templates/domain.xml delete mode 100644 playbooks/libvirt/templates/meta-data delete mode 100644 playbooks/libvirt/templates/user-data create mode 100644 roles/openshift_master/vars/main.yml create mode 100644 roles/openshift_node/vars/main.yml create mode 100644 roles/openshift_register_nodes/vars/main.yml delete mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta delete mode 100644 roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release delete mode 100644 roles/openshift_repos/files/online/epel7-kubernetes.repo delete mode 100644 roles/openshift_repos/files/online/epel7-openshift.repo create mode 100644 roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta create mode 100644 roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release delete mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo delete mode 100644 roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo create mode 100644 roles/openshift_repos/files/online/repos/epel7-openshift.repo create mode 100644 roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo create mode 100644 roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo create mode 100644 roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo delete mode 100644 roles/openshift_repos/files/online/rhel-7-libra-candidate.repo create mode 100644 roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo (limited to 'playbooks/aws') diff --git a/README_OSE.md b/README_OSE.md index 6ebdb7f99..6d4a9ba92 100644 --- a/README_OSE.md +++ b/README_OSE.md @@ -7,15 +7,17 @@ * [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups) * [Running the ansible playbooks](#running-the-ansible-playbooks) * [Post-ansible steps](#post-ansible-steps) +* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames) ## Requirements * ansible - * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+ + * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+ + * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842 * Available in Fedora channels * Available for EL with EPEL and Optional channel * One or more RHEL 7.1 VMs -* ssh key based auth for the root user needs to be pre-configured from the host - running ansible to the remote hosts +* Either ssh key based auth for the root user or ssh key based auth for a user + with sudo access (no password) * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/ ```sh @@ -48,9 +50,6 @@ subscription-manager repos \ ``` * Configuration of router is not automated yet * Configuration of docker-registry is not automated yet -* End-to-end testing has not been completed yet using this module -* root user is used for all ansible actions; eventually we will support using - a non-root user with sudo. ## Configuring the host inventory [Ansible docs](http://docs.ansible.com/intro_inventory.html) @@ -64,6 +63,38 @@ option to ansible-playbook. ```ini # This is an example of a bring your own (byo) host inventory +# Create an OSEv3 group that contains the maters and nodes groups +[OSEv3:children] +masters +nodes + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true +#ansible_sudo=true + +# To deploy origin, change deployment_type to origin +deployment_type=enterprise + +# Pre-release registry URL +openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} + +# Pre-release additional repo +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', +'baseurl': +'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', +'enabled': 1, 'gpgcheck': 0}] + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': +'OpenShift Origin COPR', 'baseurl': +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', +'enabled': 1, 'gpgcheck': 1, gpgkey: +'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + # host group for masters [masters] ose3-master.example.com @@ -76,51 +107,13 @@ ose3-node[1:2].example.com The hostnames above should resolve both from the hosts themselves and the host where ansible is running (if different). -## Creating the default variables for the hosts and host groups -[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9) - -#### Group vars for all hosts -/etc/ansible/group_vars/all: -```yaml ---- -# Assume that we want to use the root as the ssh user for all hosts -ansible_ssh_user: root - -# Default debug level for all OpenShift hosts -openshift_debug_level: 4 - -# Set the OpenShift deployment type for all hosts -openshift_deployment_type: enterprise - -# Override the default registry for development -openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} - -# To use the latest OpenShift Enterprise Errata puddle: -#openshift_additional_repos: -#- id: ose-devel -# name: ose-devel -# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os -# enabled: 1 -# gpgcheck: 0 -# To use the latest OpenShift Enterprise Whitelist puddle: -openshift_additional_repos: -- id: ose-devel - name: ose-devel - baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os - enabled: 1 - gpgcheck: 0 - -``` - ## Running the ansible playbooks From the openshift-ansible checkout run: ```sh ansible-playbook playbooks/byo/config.yml ``` -**Note:** this assumes that the host inventory is /etc/ansible/hosts and the -group_vars are defined in /etc/ansible/group_vars, if using a different -inventory file (and a group_vars directory that is in the same directory as -the directory as the inventory) use the -i option for ansible-playbook. +**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different +inventory file use the -i option for ansible-playbook. ## Post-ansible steps #### Create the default router @@ -140,3 +133,109 @@ openshift ex registry --create=true \ --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \ --mount-host=/var/lib/openshift/docker-registry ``` + +## Overriding detected ip addresses and hostnames +Some deployments will require that the user override the detected hostnames +and ip addresses for the hosts. To see what the default values will be you can +run the openshift_facts playbook: +```sh +ansible-playbook playbooks/byo/openshift_facts.yml +``` +The output will be similar to: +``` +ok: [10.3.9.45] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "ip": "172.16.4.79", + "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com", + "public_ip": "10.3.9.45", + "use_openshift_sdn": true + }, + "provider": { + ... ... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.42] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "ip": "172.16.4.75", + "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com", + "public_ip": "10.3.9.42", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +ok: [10.3.9.36] => { + "result": { + "ansible_facts": { + "openshift": { + "common": { + "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "ip": "172.16.4.73", + "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com", + "public_ip": "10.3.9.36", + "use_openshift_sdn": true + }, + "provider": { + ...... + } + } + }, + "changed": false, + "invocation": { + "module_args": "", + "module_name": "openshift_facts" + } + } +} +``` +Now, we want to verify the detected common settings to verify that they are +what we expect them to be (if not, we can override them). + +* hostname + * Should resolve to the internal ip from the instances themselves. + * openshift_hostname will override. +* ip + * Should be the internal ip of the instance. + * openshift_ip will override. +* public hostname + * Should resolve to the external ip from hosts outside of the cloud + * provider openshift_public_hostname will override. +* public_ip + * Should be the externally accessible ip associated with the instance + * openshift_public_ip will override +* use_openshift_sdn + * Should be true unless the cloud is GCE. + * openshift_use_openshift_sdn overrides + +To override the the defaults, you can set the variables in your inventory: +``` +...snip... +[masters] +ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com +...snip... +``` diff --git a/README_libvirt.md b/README_libvirt.md index fd2eb57f6..bcbaf4bd5 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -1,4 +1,3 @@ - LIBVIRT Setup instructions ========================== @@ -9,19 +8,21 @@ This makes `libvirt` useful to develop, test and debug Openshift and openshift-a Install dependencies -------------------- -1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) -2. Install [ebtables](http://ebtables.netfilter.org/) -3. Install [qemu](http://wiki.qemu.org/Main_Page) -4. Install [libvirt](http://libvirt.org/) -5. Enable and start the libvirt daemon, e.g: - * ``systemctl enable libvirtd`` - * ``systemctl start libvirtd`` -6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html) -7. Check that your `$HOME` is accessible to the qemu user² +1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) +2. Install [ebtables](http://ebtables.netfilter.org/) +3. Install [qemu](http://wiki.qemu.org/Main_Page) +4. Install [libvirt](http://libvirt.org/) +5. Enable and start the libvirt daemon, e.g: + - `systemctl enable libvirtd` + - `systemctl start libvirtd` +6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html) +7. Check that your `$HOME` is accessible to the qemu user² +8. Configure dns resolution on the host³ #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access. You can test it with the following command: + ``` virsh -c qemu:///system pool-list ``` @@ -67,12 +68,7 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied ``` -In order to fix that issue, you have several possibilities: -* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: - * backed by a filesystem with a lot of free disk space - * writable by your user; - * accessible by the qemu user. -* Grant the qemu user access to the storage pool. +In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool. On Arch: @@ -80,13 +76,55 @@ On Arch: setfacl -m g:kvm:--x ~ ``` -Test the setup +#### ³ Enabling DNS resolution to your guest VMs with NetworkManager + +- Verify NetworkManager is configured to use dnsmasq: + +```sh +$ sudo vi /etc/NetworkManager/NetworkManager.conf +[main] +dns=dnsmasq +``` + +- Configure dnsmasq to use the Virtual Network router for example.com: + +```sh +sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1 +``` + +Test The Setup -------------- +1. cd openshift-ansible/ +2. Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed) + +``` + bin/cluster list libvirt '' ``` -cd openshift-ansible -bin/cluster create -m 1 -n 3 libvirt lenaic +Creating a cluster +------------------ + +1. To create a cluster with one master and two nodes -bin/cluster terminate libvirt lenaic +``` + bin/cluster create libvirt lenaic +``` + +Updating a cluster +------------------ + +1. To update the cluster + +``` + bin/cluster update libvirt lenaic +``` + +Terminating a cluster +--------------------- + +1. To terminate the cluster + +``` + bin/cluster terminate libvirt lenaic ``` diff --git a/bin/cluster b/bin/cluster index ca227721e..79f1f988f 100755 --- a/bin/cluster +++ b/bin/cluster @@ -22,13 +22,28 @@ class Cluster(object): '-o ControlPersist=600s ' ) + def get_deployment_type(self, args): + """ + Get the deployment_type based on the environment variables and the + command line arguments + :param args: command line arguments provided by the user + :return: string representing the deployment type + """ + deployment_type = 'origin' + if args.deployment_type: + deployment_type = args.deployment_type + elif 'OS_DEPLOYMENT_TYPE' in os.environ: + deployment_type = os.environ['OS_DEPLOYMENT_TYPE'] + return deployment_type + def create(self, args): """ Create an OpenShift cluster for given provider :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -43,7 +58,8 @@ class Cluster(object): :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -55,19 +71,34 @@ class Cluster(object): :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider) inventory = self.setup_provider(args.provider) return self.action(args, inventory, env, playbook) + def config(self, args): + """ + Configure or reconfigure OpenShift across clustered VMs + :param args: command line arguments provided by user + :return: exit status from run command + """ + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} + playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider) + inventory = self.setup_provider(args.provider) + + return self.action(args, inventory, env, playbook) + def update(self, args): """ Update to latest OpenShift across clustered VMs :param args: command line arguments provided by user :return: exit status from run command """ - env = {'cluster_id': args.cluster_id} + env = {'cluster_id': args.cluster_id, + 'deployment_type': self.get_deployment_type(args)} playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider) inventory = self.setup_provider(args.provider) @@ -81,19 +112,19 @@ class Cluster(object): """ config = ConfigParser.ConfigParser() if 'gce' == provider: - config.readfp(open('inventory/gce/gce.ini')) + config.readfp(open('inventory/gce/hosts/gce.ini')) for key in config.options('gce'): os.environ[key] = config.get('gce', key) - inventory = '-i inventory/gce/gce.py' + inventory = '-i inventory/gce/hosts' elif 'aws' == provider: - config.readfp(open('inventory/aws/ec2.ini')) + config.readfp(open('inventory/aws/hosts/ec2.ini')) for key in config.options('ec2'): os.environ[key] = config.get('ec2', key) - inventory = '-i inventory/aws/ec2.py' + inventory = '-i inventory/aws/hosts' elif 'libvirt' == provider: inventory = '-i inventory/libvirt/hosts' else: @@ -145,29 +176,49 @@ if __name__ == '__main__': parser = argparse.ArgumentParser( description='Python wrapper to ensure proper environment for OpenShift ansible playbooks', ) - parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity') + parser.add_argument('-v', '--verbose', action='count', + help='Multiple -v options increase the verbosity') parser.add_argument('--version', action='version', version='%(prog)s 0.2') meta_parser = argparse.ArgumentParser(add_help=False) meta_parser.add_argument('provider', choices=providers, help='provider') meta_parser.add_argument('cluster_id', help='prefix for cluster VM names') - - action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions') - - create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser]) - create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster') - create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster') + meta_parser.add_argument('-t', '--deployment-type', + choices=['origin', 'online', 'enterprise'], + help='Deployment type. (default: origin)') + + action_parser = parser.add_subparsers(dest='action', title='actions', + description='Choose from valid actions') + + create_parser = action_parser.add_parser('create', help='Create a cluster', + parents=[meta_parser]) + create_parser.add_argument('-m', '--masters', default=1, type=int, + help='number of masters to create in cluster') + create_parser.add_argument('-n', '--nodes', default=2, type=int, + help='number of nodes to create in cluster') create_parser.set_defaults(func=cluster.create) - terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser]) - terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation') + config_parser = action_parser.add_parser('config', + help='Configure or reconfigure a cluster', + parents=[meta_parser]) + config_parser.set_defaults(func=cluster.config) + + terminate_parser = action_parser.add_parser('terminate', + help='Destroy a cluster', + parents=[meta_parser]) + terminate_parser.add_argument('-f', '--force', action='store_true', + help='Destroy cluster without confirmation') terminate_parser.set_defaults(func=cluster.terminate) - update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser]) - update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation') + update_parser = action_parser.add_parser('update', + help='Update OpenShift across cluster', + parents=[meta_parser]) + update_parser.add_argument('-f', '--force', action='store_true', + help='Update cluster without confirmation') update_parser.set_defaults(func=cluster.update) - list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser]) + list_parser = action_parser.add_parser('list', help='List VMs in cluster', + parents=[meta_parser]) list_parser.set_defaults(func=cluster.list) args = parser.parse_args() diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index 1cf02218c..cf30cde9a 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -5,6 +5,7 @@ from ansible import errors, runner import json import pdb +import re def oo_pdb(arg): ''' This pops you into a pdb instance where arg is the data passed in from the filter. @@ -101,6 +102,18 @@ def oo_prepend_strings_in_list(data, prepend): retval = [prepend + s for s in data] return retval +def oo_get_deployment_type_from_groups(data): + ''' This takes a list of groups and returns the associated + deployment-type + ''' + if not issubclass(type(data), list): + raise errors.AnsibleFilterError("|failed expects first param is a list") + regexp = re.compile('^tag_deployment-type[-_]') + matches = filter(regexp.match, data) + if len(matches) > 0: + return regexp.sub('', matches[0]) + return "Unknown" + class FilterModule (object): def filters(self): return { @@ -109,5 +122,6 @@ class FilterModule (object): "oo_flatten": oo_flatten, "oo_len": oo_len, "oo_pdb": oo_pdb, - "oo_prepend_strings_in_list": oo_prepend_strings_in_list + "oo_prepend_strings_in_list": oo_prepend_strings_in_list, + "oo_get_deployment_type_from_groups": oo_get_deployment_type_from_groups } diff --git a/inventory/aws/ec2.ini b/inventory/aws/ec2.ini deleted file mode 100644 index eaab0a410..000000000 --- a/inventory/aws/ec2.ini +++ /dev/null @@ -1,62 +0,0 @@ -# Ansible EC2 external inventory script settings -# - -[ec2] - -# to talk to a private eucalyptus instance uncomment these lines -# and edit edit eucalyptus_host to be the host name of your cloud controller -#eucalyptus = True -#eucalyptus_host = clc.cloud.domain.org - -# AWS regions to make calls to. Set this to 'all' to make request to all regions -# in AWS and merge the results together. Alternatively, set this to a comma -# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' -regions = all -regions_exclude = us-gov-west-1,cn-north-1 - -# When generating inventory, Ansible needs to know how to address a server. -# Each EC2 instance has a lot of variables associated with it. Here is the list: -# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance -# Below are 2 variables that are used as the address of a server: -# - destination_variable -# - vpc_destination_variable - -# This is the normal destination variable to use. If you are running Ansible -# from outside EC2, then 'public_dns_name' makes the most sense. If you are -# running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. -destination_variable = public_dns_name - -# For server inside a VPC, using DNS names may not make sense. When an instance -# has 'subnet_id' set, this variable is used. If the subnet is public, setting -# this to 'ip_address' will return the public IP address. For instances in a -# private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. -vpc_destination_variable = ip_address - -# To tag instances on EC2 with the resource records that point to them from -# Route53, uncomment and set 'route53' to True. -route53 = False - -# Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-separated list. -# route53_excluded_zones = samplezone1.com, samplezone2.com - -# API calls to EC2 are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-ec2.cache -# - ansible-ec2.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# These two settings allow flexible ansible host naming based on a format -# string and a comma-separated list of ec2 tags. The tags used must be -# present for all instances, or the code will fail. This overrides both -# destination_variable and vpc_destination_variable. -# destination_format = {0}.{1}.rhcloud.com -# destination_format_tags = Name,environment diff --git a/inventory/aws/ec2.py b/inventory/aws/ec2.py deleted file mode 100755 index f231ff4c2..000000000 --- a/inventory/aws/ec2.py +++ /dev/null @@ -1,798 +0,0 @@ -#!/usr/bin/env python2 - -''' -EC2 external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -AWS EC2 using the Boto library. - -NOTE: This script assumes Ansible is being executed where the environment -variables needed for Boto have already been set: - export AWS_ACCESS_KEY_ID='AK123' - export AWS_SECRET_ACCESS_KEY='abc123' - -This script also assumes there is an ec2.ini file alongside it. To specify a -different path to ec2.ini, define the EC2_INI_PATH environment variable: - - export EC2_INI_PATH=/path/to/my_ec2.ini - -If you're using eucalyptus you need to set the above variables and -you need to define: - - export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus - -For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html - -When run against a specific host, this script returns the following variables: - - ec2_ami_launch_index - - ec2_architecture - - ec2_association - - ec2_attachTime - - ec2_attachment - - ec2_attachmentId - - ec2_client_token - - ec2_deleteOnTermination - - ec2_description - - ec2_deviceIndex - - ec2_dns_name - - ec2_eventsSet - - ec2_group_name - - ec2_hypervisor - - ec2_id - - ec2_image_id - - ec2_instanceState - - ec2_instance_type - - ec2_ipOwnerId - - ec2_ip_address - - ec2_item - - ec2_kernel - - ec2_key_name - - ec2_launch_time - - ec2_monitored - - ec2_monitoring - - ec2_networkInterfaceId - - ec2_ownerId - - ec2_persistent - - ec2_placement - - ec2_platform - - ec2_previous_state - - ec2_private_dns_name - - ec2_private_ip_address - - ec2_publicIp - - ec2_public_dns_name - - ec2_ramdisk - - ec2_reason - - ec2_region - - ec2_requester_id - - ec2_root_device_name - - ec2_root_device_type - - ec2_security_group_ids - - ec2_security_group_names - - ec2_shutdown_state - - ec2_sourceDestCheck - - ec2_spot_instance_request_id - - ec2_state - - ec2_state_code - - ec2_state_reason - - ec2_status - - ec2_subnet_id - - ec2_tenancy - - ec2_virtualization_type - - ec2_vpc_id - -These variables are pulled out of a boto.ec2.instance object. There is a lack of -consistency with variable spellings (camelCase and underscores) since this -just loops through all variables the object exposes. It is preferred to use the -ones with underscores when multiple exist. - -In addition, if an instance has AWS Tags associated with it, each tag is a new -variable named: - - ec2_tag_[Key] = [Value] - -Security groups are comma-separated in 'ec2_security_group_ids' and -'ec2_security_group_names'. -''' - -# (c) 2012, Peter Sankauskas -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import sys -import os -import argparse -import re -from time import time -import boto -from boto import ec2 -from boto import rds -from boto import route53 -import ConfigParser -from collections import defaultdict - -try: - import json -except ImportError: - import simplejson as json - - -class Ec2Inventory(object): - def _empty_inventory(self): - return {"_meta" : {"hostvars" : {}}} - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - # Index of hostname (address) to instance ID - self.index = {} - - # Read settings and parse CLI arguments - self.read_settings() - self.parse_cli_args() - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print data_to_print - - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - - def read_settings(self): - ''' Reads the settings from the ec2.ini file ''' - - config = ConfigParser.SafeConfigParser() - ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') - ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) - config.read(ec2_ini_path) - - # is eucalyptus? - self.eucalyptus_host = None - self.eucalyptus = False - if config.has_option('ec2', 'eucalyptus'): - self.eucalyptus = config.getboolean('ec2', 'eucalyptus') - if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): - self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') - - # Regions - self.regions = [] - configRegions = config.get('ec2', 'regions') - configRegions_exclude = config.get('ec2', 'regions_exclude') - if (configRegions == 'all'): - if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) - else: - for regionInfo in ec2.regions(): - if regionInfo.name not in configRegions_exclude: - self.regions.append(regionInfo.name) - else: - self.regions = configRegions.split(",") - - # Destination addresses - self.destination_variable = config.get('ec2', 'destination_variable') - self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - - if config.has_option('ec2', 'destination_format') and \ - config.has_option('ec2', 'destination_format_tags'): - self.destination_format = config.get('ec2', 'destination_format') - self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') - else: - self.destination_format = None - self.destination_format_tags = None - - # Route53 - self.route53_enabled = config.getboolean('ec2', 'route53') - self.route53_excluded_zones = [] - if config.has_option('ec2', 'route53_excluded_zones'): - self.route53_excluded_zones.extend( - config.get('ec2', 'route53_excluded_zones', '').split(',')) - - # Include RDS instances? - self.rds_enabled = True - if config.has_option('ec2', 'rds'): - self.rds_enabled = config.getboolean('ec2', 'rds') - - # Return all EC2 and RDS instances (if RDS is enabled) - if config.has_option('ec2', 'all_instances'): - self.all_instances = config.getboolean('ec2', 'all_instances') - else: - self.all_instances = False - if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: - self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') - else: - self.all_rds_instances = False - - # Cache related - cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - self.cache_path_cache = cache_dir + "/ansible-ec2.cache" - self.cache_path_index = cache_dir + "/ansible-ec2.index" - self.cache_max_age = config.getint('ec2', 'cache_max_age') - - # Configure nested groups instead of flat namespace. - if config.has_option('ec2', 'nested_groups'): - self.nested_groups = config.getboolean('ec2', 'nested_groups') - else: - self.nested_groups = False - - # Configure which groups should be created. - group_by_options = [ - 'group_by_instance_id', - 'group_by_region', - 'group_by_availability_zone', - 'group_by_ami_id', - 'group_by_instance_type', - 'group_by_key_pair', - 'group_by_vpc_id', - 'group_by_security_group', - 'group_by_tag_keys', - 'group_by_tag_none', - 'group_by_route53_names', - 'group_by_rds_engine', - 'group_by_rds_parameter_group', - ] - for option in group_by_options: - if config.has_option('ec2', option): - setattr(self, option, config.getboolean('ec2', option)) - else: - setattr(self, option, True) - - # Do we need to just include hosts that match a pattern? - try: - pattern_include = config.get('ec2', 'pattern_include') - if pattern_include and len(pattern_include) > 0: - self.pattern_include = re.compile(pattern_include) - else: - self.pattern_include = None - except ConfigParser.NoOptionError, e: - self.pattern_include = None - - # Do we need to exclude hosts that match a pattern? - try: - pattern_exclude = config.get('ec2', 'pattern_exclude'); - if pattern_exclude and len(pattern_exclude) > 0: - self.pattern_exclude = re.compile(pattern_exclude) - else: - self.pattern_exclude = None - except ConfigParser.NoOptionError, e: - self.pattern_exclude = None - - # Instance filters (see boto and EC2 API docs). Ignore invalid filters. - self.ec2_instance_filters = defaultdict(list) - if config.has_option('ec2', 'instance_filters'): - for instance_filter in config.get('ec2', 'instance_filters', '').split(','): - instance_filter = instance_filter.strip() - if not instance_filter or '=' not in instance_filter: - continue - filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] - if not filter_key: - continue - self.ec2_instance_filters[filter_key].append(filter_value) - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - self.args = parser.parse_args() - - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - if self.route53_enabled: - self.get_route53_records() - - for region in self.regions: - self.get_instances_by_region(region) - if self.rds_enabled: - self.get_rds_instances_by_region(region) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - - def get_instances_by_region(self, region): - ''' Makes an AWS EC2 API call to the list of instances in a particular - region ''' - - try: - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - - reservations = [] - if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.iteritems(): - reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) - else: - reservations = conn.get_all_instances() - - for reservation in reservations: - for instance in reservation.instances: - self.add_instance(instance, region) - - except boto.exception.BotoServerError, e: - if not self.eucalyptus: - print "Looks like AWS is down again:" - print e - sys.exit(1) - - def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular - region ''' - - try: - conn = rds.connect_to_region(region) - if conn: - instances = conn.get_all_dbinstances() - for instance in instances: - self.add_rds_instance(instance, region) - except boto.exception.BotoServerError, e: - if not e.reason == "Forbidden": - print "Looks like AWS RDS is down: " - print e - sys.exit(1) - - def get_instance(self, region, instance_id): - ''' Gets details about a specific instance ''' - if self.eucalyptus: - conn = boto.connect_euca(self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - - reservations = conn.get_all_instances([instance_id]) - for reservation in reservations: - for instance in reservation.instances: - return instance - - def add_instance(self, instance, region): - ''' Adds an instance to the inventory and index, as long as it is - addressable ''' - - # Only want running instances unless all_instances is True - if not self.all_instances and instance.state != 'running': - return - - # Select the best destination address - if self.destination_format and self.destination_format_tags: - dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) - elif instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) - else: - dest = getattr(instance, self.destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.destination_variable, None) - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(dest): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(dest): - return - - # Add to index - self.index[dest] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.placement) - self.push_group(self.inventory, 'zones', instance.placement) - - # Inventory: Group by Amazon Machine Image (AMI) ID - if self.group_by_ami_id: - ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, dest) - if self.nested_groups: - self.push_group(self.inventory, 'images', ami_id) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by key pair - if self.group_by_key_pair and instance.key_name: - key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'keys', key_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) - - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.iteritems(): - key = self.to_safe("tag_" + k + "=" + v) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled and self.group_by_route53_names: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - self.push(self.inventory, name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'route53', name) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', dest) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) - - - def add_rds_instance(self, instance, region): - ''' Adds an RDS instance to the inventory and index, as long as it is - addressable ''' - - # Only want available instances unless all_rds_instances is True - if not self.all_rds_instances and instance.status != 'available': - return - - # Select the best destination address - dest = instance.endpoint[0] - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.availability_zone) - self.push_group(self.inventory, 'zones', instance.availability_zone) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) - - # Inventory: Group by engine - if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) - - # Inventory: Group by parameter group - if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) - - # Global Tag: all RDS instances - self.push(self.inventory, 'rds', dest) - - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) - - - def get_route53_records(self): - ''' Get and store the map of resource records to domain names that - point to them. ''' - - r53_conn = route53.Route53Connection() - all_zones = r53_conn.get_zones() - - route53_zones = [ zone for zone in all_zones if zone.name[:-1] - not in self.route53_excluded_zones ] - - self.route53_records = {} - - for zone in route53_zones: - rrsets = r53_conn.get_all_rrsets(zone.id) - - for record_set in rrsets: - record_name = record_set.name - - if record_name.endswith('.'): - record_name = record_name[:-1] - - for resource in record_set.resource_records: - self.route53_records.setdefault(resource, set()) - self.route53_records[resource].add(record_name) - - - def get_instance_route53_names(self, instance): - ''' Check if an instance is referenced in the records we have from - Route53. If it is, return the list of domain names pointing to said - instance. If nothing points to it, return an empty list. ''' - - instance_attributes = [ 'public_dns_name', 'private_dns_name', - 'ip_address', 'private_ip_address' ] - - name_list = set() - - for attrib in instance_attributes: - try: - value = getattr(instance, attrib) - except AttributeError: - continue - - if value in self.route53_records: - name_list.update(self.route53_records[value]) - - return list(name_list) - - - def get_host_info_dict_from_instance(self, instance): - instance_vars = {} - for key in vars(instance): - value = getattr(instance, key) - key = self.to_safe('ec2_' + key) - - # Handle complex types - # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 - if key == 'ec2__state': - instance_vars['ec2_state'] = instance.state or '' - instance_vars['ec2_state_code'] = instance.state_code - elif key == 'ec2__previous_state': - instance_vars['ec2_previous_state'] = instance.previous_state or '' - instance_vars['ec2_previous_state_code'] = instance.previous_state_code - elif type(value) in [int, bool]: - instance_vars[key] = value - elif type(value) in [str, unicode]: - instance_vars[key] = value.strip() - elif type(value) == type(None): - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2__placement': - instance_vars['ec2_placement'] = value.zone - elif key == 'ec2_tags': - for k, v in value.iteritems(): - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) - instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) - else: - pass - # TODO Product codes if someone finds them useful - #print key - #print type(value) - #print value - - return instance_vars - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if not self.args.host in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if not self.args.host in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (region, instance_id) = self.index[self.args.host] - - instance = self.get_instance(region, instance_id) - return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - cache = open(self.cache_path_cache, 'r') - json_inventory = cache.read() - return json_inventory - - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - cache = open(self.cache_path_index, 'r') - json_index = cache.read() - self.index = json.loads(json_index) - - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - cache = open(filename, 'w') - cache.write(json_data) - cache.close() - - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be - used as Ansible groups ''' - - return re.sub("[^A-Za-z0-9\-]", "_", word) - - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -Ec2Inventory() - diff --git a/inventory/aws/group_vars/all b/inventory/aws/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/aws/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini new file mode 100644 index 000000000..eaab0a410 --- /dev/null +++ b/inventory/aws/hosts/ec2.ini @@ -0,0 +1,62 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' +regions = all +regions_exclude = us-gov-west-1,cn-north-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. +destination_variable = public_dns_name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from with EC2. +vpc_destination_variable = ip_address + +# To tag instances on EC2 with the resource records that point to them from +# Route53, uncomment and set 'route53' to True. +route53 = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-separated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# These two settings allow flexible ansible host naming based on a format +# string and a comma-separated list of ec2 tags. The tags used must be +# present for all instances, or the code will fail. This overrides both +# destination_variable and vpc_destination_variable. +# destination_format = {0}.{1}.rhcloud.com +# destination_format_tags = Name,environment diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py new file mode 100755 index 000000000..f231ff4c2 --- /dev/null +++ b/inventory/aws/hosts/ec2.py @@ -0,0 +1,798 @@ +#!/usr/bin/env python2 + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +This script also assumes there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import route53 +import ConfigParser +from collections import defaultdict + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + def _empty_inventory(self): + return {"_meta" : {"hostvars" : {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to instance ID + self.index = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print data_to_print + + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + + config = ConfigParser.SafeConfigParser() + ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') + ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) + config.read(ec2_ini_path) + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + configRegions_exclude = config.get('ec2', 'regions_exclude') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + else: + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + if config.has_option('ec2', 'destination_format') and \ + config.has_option('ec2', 'destination_format_tags'): + self.destination_format = config.get('ec2', 'destination_format') + self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') + else: + self.destination_format = None + self.destination_format_tags = None + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Include RDS instances? + self.rds_enabled = True + if config.has_option('ec2', 'rds'): + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Return all EC2 and RDS instances (if RDS is enabled) + if config.has_option('ec2', 'all_instances'): + self.all_instances = config.getboolean('ec2', 'all_instances') + else: + self.all_instances = False + if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + else: + self.all_rds_instances = False + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-ec2.cache" + self.cache_path_index = cache_dir + "/ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + # Configure nested groups instead of flat namespace. + if config.has_option('ec2', 'nested_groups'): + self.nested_groups = config.getboolean('ec2', 'nested_groups') + else: + self.nested_groups = False + + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('ec2', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except ConfigParser.NoOptionError, e: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('ec2', 'pattern_exclude'); + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except ConfigParser.NoOptionError, e: + self.pattern_exclude = None + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + self.args = parser.parse_args() + + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + + for reservation in reservations: + for instance in reservation.instances: + self.add_instance(instance, region) + + except boto.exception.BotoServerError, e: + if not self.eucalyptus: + print "Looks like AWS is down again:" + print e + sys.exit(1) + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + try: + conn = rds.connect_to_region(region) + if conn: + instances = conn.get_all_dbinstances() + for instance in instances: + self.add_rds_instance(instance, region) + except boto.exception.BotoServerError, e: + if not e.reason == "Forbidden": + print "Looks like AWS RDS is down: " + print e + sys.exit(1) + + def get_instance(self, region, instance_id): + ''' Gets details about a specific instance ''' + if self.eucalyptus: + conn = boto.connect_euca(self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + sys.exit(1) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only want running instances unless all_instances is True + if not self.all_instances and instance.state != 'running': + return + + # Select the best destination address + if self.destination_format and self.destination_format_tags: + dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) + elif instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(dest): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(dest): + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.iteritems(): + key = self.to_safe("tag_" + k + "=" + v) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + print 'Package boto seems a bit older.' + print 'Please upgrade boto >= 2.3.0.' + sys.exit(1) + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [ zone for zone in all_zones if zone.name[:-1] + not in self.route53_excluded_zones ] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = [ 'public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address' ] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif type(value) in [int, bool]: + instance_vars[key] = value + elif type(value) in [str, unicode]: + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.iteritems(): + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + else: + pass + # TODO Product codes if someone finds them useful + #print key + #print type(value) + #print value + + return instance_vars + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be + used as Ansible groups ''' + + return re.sub("[^A-Za-z0-9\-]", "_", word) + + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +Ec2Inventory() + diff --git a/inventory/aws/hosts/hosts b/inventory/aws/hosts/hosts new file mode 100644 index 000000000..6c590ac93 --- /dev/null +++ b/inventory/aws/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/byo/group_vars/all b/inventory/byo/group_vars/all deleted file mode 100644 index d63e96668..000000000 --- a/inventory/byo/group_vars/all +++ /dev/null @@ -1,28 +0,0 @@ ---- -# lets assume that we want to use the root as the ssh user for all hosts -ansible_ssh_user: root - -# default debug level for all OpenShift hosts -openshift_debug_level: 4 - -# set the OpenShift deployment type for all hosts -openshift_deployment_type: enterprise - -# Override the default registry for development -openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} - -# Use latest Errata puddle as an additional repo: -#openshift_additional_repos: -#- id: ose-devel -# name: ose-devel -# baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os -# enabled: 1 -# gpgcheck: 0 - -# Use latest Whitelist puddle as an additional repo: -openshift_additional_repos: -- id: ose-devel - name: ose-devel - baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os - enabled: 1 - gpgcheck: 0 diff --git a/inventory/byo/hosts b/inventory/byo/hosts index 2dd854778..e9af5e571 100644 --- a/inventory/byo/hosts +++ b/inventory/byo/hosts @@ -1,5 +1,30 @@ # This is an example of a bring your own (byo) host inventory +# Create an OSEv3 group that contains the maters and nodes groups +[OSEv3:children] +masters +nodes + +# Set variables common for all OSEv3 hosts +[OSEv3:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root + +# If ansible_ssh_user is not root, ansible_sudo must be set to true +#ansible_sudo=true + +# To deploy origin, change deployment_type to origin +deployment_type=enterprise + +# Pre-release registry URL +openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version} + +# Pre-release additional repo +openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] + +# Origin copr repo +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] + # host group for masters [masters] ose3-master-ansible.test.example.com @@ -7,4 +32,3 @@ ose3-master-ansible.test.example.com # host group for nodes [nodes] ose3-node[1:2]-ansible.test.example.com - diff --git a/inventory/gce/gce.py b/inventory/gce/gce.py deleted file mode 100755 index 3403f735e..000000000 --- a/inventory/gce/gce.py +++ /dev/null @@ -1,287 +0,0 @@ -#!/usr/bin/env python2 -# Copyright 2013 Google Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -''' -GCE external inventory script -================================= - -Generates inventory that Ansible can understand by making API requests -Google Compute Engine via the libcloud library. Full install/configuration -instructions for the gce* modules can be found in the comments of -ansible/test/gce_tests.py. - -When run against a specific host, this script returns the following variables -based on the data obtained from the libcloud Node object: - - gce_uuid - - gce_id - - gce_image - - gce_machine_type - - gce_private_ip - - gce_public_ip - - gce_name - - gce_description - - gce_status - - gce_zone - - gce_tags - - gce_metadata - - gce_network - -When run in --list mode, instances are grouped by the following categories: - - zone: - zone group name examples are us-central1-b, europe-west1-a, etc. - - instance tags: - An entry is created for each tag. For example, if you have two instances - with a common tag called 'foo', they will both be grouped together under - the 'tag_foo' name. - - network name: - the name of the network is appended to 'network_' (e.g. the 'default' - network will result in a group named 'network_default') - - machine type - types follow a pattern like n1-standard-4, g1-small, etc. - - running status: - group name prefixed with 'status_' (e.g. status_running, status_stopped,..) - - image: - when using an ephemeral/scratch disk, this will be set to the image name - used when creating the instance (e.g. debian-7-wheezy-v20130816). when - your instance was created with a root persistent disk it will be set to - 'persistent_disk' since there is no current way to determine the image. - -Examples: - Execute uname on all instances in the us-central1-a zone - $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" - - Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance - -Author: Eric Johnson -Version: 0.0.1 -''' - -USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" -USER_AGENT_VERSION="v1" - -import sys -import os -import argparse -import ConfigParser - -try: - import json -except ImportError: - import simplejson as json - -try: - from libcloud.compute.types import Provider - from libcloud.compute.providers import get_driver - _ = Provider.GCE -except: - print("GCE inventory script requires libcloud >= 0.13") - sys.exit(1) - - -class GceInventory(object): - def __init__(self): - # Read settings and parse CLI arguments - self.parse_cli_args() - self.driver = self.get_gce_driver() - - # Just display data for specific host - if self.args.host: - print self.json_format_dict(self.node_to_dict( - self.get_instance(self.args.host)), - pretty=self.args.pretty) - sys.exit(0) - - # Otherwise, assume user wants all instances grouped - print(self.json_format_dict(self.group_instances(), - pretty=self.args.pretty)) - sys.exit(0) - - def get_gce_driver(self): - """Determine the GCE authorization settings and return a - libcloud driver. - """ - gce_ini_default_path = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "gce.ini") - gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) - - # Create a ConfigParser. - # This provides empty defaults to each key, so that environment - # variable configuration (as opposed to INI configuration) is able - # to work. - config = ConfigParser.SafeConfigParser(defaults={ - 'gce_service_account_email_address': '', - 'gce_service_account_pem_file_path': '', - 'gce_project_id': '', - 'libcloud_secrets': '', - }) - if 'gce' not in config.sections(): - config.add_section('gce') - config.read(gce_ini_path) - - # Attempt to get GCE params from a configuration file, if one - # exists. - secrets_path = config.get('gce', 'libcloud_secrets') - secrets_found = False - try: - import secrets - args = list(getattr(secrets, 'GCE_PARAMS', [])) - kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) - secrets_found = True - except: - pass - - if not secrets_found and secrets_path: - if not secrets_path.endswith('secrets.py'): - err = "Must specify libcloud secrets file as " - err += "/absolute/path/to/secrets.py" - print(err) - sys.exit(1) - sys.path.append(os.path.dirname(secrets_path)) - try: - import secrets - args = list(getattr(secrets, 'GCE_PARAMS', [])) - kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) - secrets_found = True - except: - pass - if not secrets_found: - args = [ - config.get('gce','gce_service_account_email_address'), - config.get('gce','gce_service_account_pem_file_path') - ] - kwargs = {'project': config.get('gce', 'gce_project_id')} - - # If the appropriate environment variables are set, they override - # other configuration; process those into our args and kwargs. - args[0] = os.environ.get('GCE_EMAIL', args[0]) - args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) - kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) - - # Retrieve and return the GCE driver. - gce = get_driver(Provider.GCE)(*args, **kwargs) - gce.connection.user_agent_append( - '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), - ) - return gce - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser( - description='Produce an Ansible Inventory file based on GCE') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all information about an instance') - parser.add_argument('--pretty', action='store_true', default=False, - help='Pretty format (default: False)') - self.args = parser.parse_args() - - - def node_to_dict(self, inst): - md = {} - - if inst is None: - return {} - - if inst.extra['metadata'].has_key('items'): - for entry in inst.extra['metadata']['items']: - md[entry['key']] = entry['value'] - - net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] - return { - 'gce_uuid': inst.uuid, - 'gce_id': inst.id, - 'gce_image': inst.image, - 'gce_machine_type': inst.size, - 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], - 'gce_name': inst.name, - 'gce_description': inst.extra['description'], - 'gce_status': inst.extra['status'], - 'gce_zone': inst.extra['zone'].name, - 'gce_tags': inst.extra['tags'], - 'gce_metadata': md, - 'gce_network': net, - # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] - } - - def get_instance(self, instance_name): - '''Gets details about a specific instance ''' - try: - return self.driver.ex_get_node(instance_name) - except Exception, e: - return None - - def group_instances(self): - '''Group all instances''' - groups = {} - meta = {} - meta["hostvars"] = {} - - for node in self.driver.list_nodes(): - name = node.name - - meta["hostvars"][name] = self.node_to_dict(node) - - zone = node.extra['zone'].name - if groups.has_key(zone): groups[zone].append(name) - else: groups[zone] = [name] - - tags = node.extra['tags'] - for t in tags: - tag = 'tag_%s' % t - if groups.has_key(tag): groups[tag].append(name) - else: groups[tag] = [name] - - net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] - net = 'network_%s' % net - if groups.has_key(net): groups[net].append(name) - else: groups[net] = [name] - - machine_type = node.size - if groups.has_key(machine_type): groups[machine_type].append(name) - else: groups[machine_type] = [name] - - image = node.image and node.image or 'persistent_disk' - if groups.has_key(image): groups[image].append(name) - else: groups[image] = [name] - - status = node.extra['status'] - stat = 'status_%s' % status.lower() - if groups.has_key(stat): groups[stat].append(name) - else: groups[stat] = [name] - - groups["_meta"] = meta - - return groups - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2) - else: - return json.dumps(data) - - -# Run the script -GceInventory() diff --git a/inventory/gce/group_vars/all b/inventory/gce/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/gce/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py new file mode 100755 index 000000000..3403f735e --- /dev/null +++ b/inventory/gce/hosts/gce.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python2 +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ plugins/inventory/gce.py --host my_instance + +Author: Eric Johnson +Version: 0.0.1 +''' + +USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" +USER_AGENT_VERSION="v1" + +import sys +import os +import argparse +import ConfigParser + +try: + import json +except ImportError: + import simplejson as json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + _ = Provider.GCE +except: + print("GCE inventory script requires libcloud >= 0.13") + sys.exit(1) + + +class GceInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.parse_cli_args() + self.driver = self.get_gce_driver() + + # Just display data for specific host + if self.args.host: + print self.json_format_dict(self.node_to_dict( + self.get_instance(self.args.host)), + pretty=self.args.pretty) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print(self.json_format_dict(self.group_instances(), + pretty=self.args.pretty)) + sys.exit(0) + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'libcloud_secrets': '', + }) + if 'gce' not in config.sections(): + config.add_section('gce') + config.read(gce_ini_path) + + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = config.get('gce', 'libcloud_secrets') + secrets_found = False + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + if not secrets_found: + args = [ + config.get('gce','gce_service_account_email_address'), + config.get('gce','gce_service_account_pem_file_path') + ] + kwargs = {'project': config.get('gce', 'gce_project_id')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + + # Retrieve and return the GCE driver. + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + return gce + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + self.args = parser.parse_args() + + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if inst.extra['metadata'].has_key('items'): + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0], + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] + } + + def get_instance(self, instance_name): + '''Gets details about a specific instance ''' + try: + return self.driver.ex_get_node(instance_name) + except Exception, e: + return None + + def group_instances(self): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + for node in self.driver.list_nodes(): + name = node.name + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + if groups.has_key(zone): groups[zone].append(name) + else: groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + tag = 'tag_%s' % t + if groups.has_key(tag): groups[tag].append(name) + else: groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if groups.has_key(net): groups[net].append(name) + else: groups[net] = [name] + + machine_type = node.size + if groups.has_key(machine_type): groups[machine_type].append(name) + else: groups[machine_type] = [name] + + image = node.image and node.image or 'persistent_disk' + if groups.has_key(image): groups[image].append(name) + else: groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if groups.has_key(stat): groups[stat].append(name) + else: groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +GceInventory() diff --git a/inventory/gce/hosts/hosts b/inventory/gce/hosts/hosts new file mode 100644 index 000000000..6c590ac93 --- /dev/null +++ b/inventory/gce/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/libvirt/group_vars/all b/inventory/libvirt/group_vars/all deleted file mode 100644 index b22da00de..000000000 --- a/inventory/libvirt/group_vars/all +++ /dev/null @@ -1,2 +0,0 @@ ---- -ansible_ssh_user: root diff --git a/inventory/libvirt/hosts b/inventory/libvirt/hosts deleted file mode 100644 index 6a818f268..000000000 --- a/inventory/libvirt/hosts +++ /dev/null @@ -1,2 +0,0 @@ -# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now... -localhost ansible_python_interpreter=/usr/bin/python2 diff --git a/inventory/libvirt/hosts/hosts b/inventory/libvirt/hosts/hosts new file mode 100644 index 000000000..9cdc31449 --- /dev/null +++ b/inventory/libvirt/hosts/hosts @@ -0,0 +1 @@ +localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local diff --git a/inventory/libvirt/hosts/libvirt.ini b/inventory/libvirt/hosts/libvirt.ini new file mode 100644 index 000000000..62ff204dd --- /dev/null +++ b/inventory/libvirt/hosts/libvirt.ini @@ -0,0 +1,20 @@ +# Ansible libvirt external inventory script settings +# + +[libvirt] + +uri = qemu:///system + +# API calls to libvirt can be slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-libvirt.cache +# - ansible-libvirt.index +cache_path = /tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +cache_max_age = 900 + + + diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py new file mode 100755 index 000000000..0a98e2af3 --- /dev/null +++ b/inventory/libvirt/hosts/libvirt_generic.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python + +""" +libvirt external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this, copy this file over /etc/ansible/hosts and chmod +x the file. +This, more or less, allows you to keep one central database containing +info about all of your managed instances. + +""" + +# (c) 2015, Jason DeTiberus +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import argparse +import ConfigParser +import os +import re +import sys +from time import time +import libvirt +import xml.etree.ElementTree as ET + +try: + import json +except ImportError: + import simplejson as json + + +class LibvirtInventory(object): + + def __init__(self): + self.inventory = dict() # A list of groups and the hosts in that group + self.cache = dict() # Details about hosts in the inventory + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + if self.args.host: + print self.json_format_dict(self.get_host_info(), self.args.pretty) + elif self.args.list: + print self.json_format_dict(self.get_inventory(), self.args.pretty) + else: # default action with no options + print self.json_format_dict(self.get_inventory(), self.args.pretty) + + def read_settings(self): + config = ConfigParser.SafeConfigParser() + config.read( + os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini' + ) + self.libvirt_uri = config.get('libvirt', 'uri') + + def parse_cli_args(self): + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on libvirt' + ) + parser.add_argument( + '--list', + action='store_true', + default=True, + help='List instances (default: True)' + ) + parser.add_argument( + '--host', + action='store', + help='Get all the variables about a specific instance' + ) + parser.add_argument( + '--pretty', + action='store_true', + default=False, + help='Pretty format (default: False)' + ) + self.args = parser.parse_args() + + def get_host_info(self): + inventory = self.get_inventory() + if self.args.host in inventory['_meta']['hostvars']: + return inventory['_meta']['hostvars'][self.args.host] + + def get_inventory(self): + inventory = dict(_meta=dict(hostvars=dict())) + + conn = libvirt.openReadOnly(self.libvirt_uri) + if conn is None: + print "Failed to open connection to %s" % libvirt_uri + sys.exit(1) + + domains = conn.listAllDomains() + if domains is None: + print "Failed to list domains for connection %s" % libvirt_uri + sys.exit(1) + + arp_entries = self.parse_arp_entries() + + for domain in domains: + hostvars = dict(libvirt_name=domain.name(), + libvirt_id=domain.ID(), + libvirt_uuid=domain.UUIDString()) + domain_name = domain.name() + + # TODO: add support for guests that are not in a running state + state, _ = domain.state() + # 2 is the state for a running guest + if state != 1: + continue + + hostvars['libvirt_status'] = 'running' + + root = ET.fromstring(domain.XMLDesc()) + ns = {'ansible': 'https://github.com/ansible/ansible'} + for tag_elem in root.findall('./metadata/ansible:tag', ns): + tag = tag_elem.text + self.push(inventory, "tag_%s" % tag, domain_name) + self.push(hostvars, 'libvirt_tags', tag) + + # TODO: support more than one network interface, also support + # interface types other than 'network' + interface = root.find("./devices/interface[@type='network']") + if interface is not None: + mac_elem = interface.find('mac') + if mac_elem is not None: + mac = mac_elem.get('address') + if mac in arp_entries: + ip_address = arp_entries[mac]['ip_address'] + hostvars['ansible_ssh_host'] = ip_address + hostvars['libvirt_ip_address'] = ip_address + + inventory['_meta']['hostvars'][domain_name] = hostvars + + return inventory + + def parse_arp_entries(self): + arp_entries = dict() + with open('/proc/net/arp', 'r') as f: + # throw away the header + f.readline() + + for line in f: + ip_address, _, _, mac, _, device = line.strip().split() + arp_entries[mac] = dict(ip_address=ip_address, device=device) + + return arp_entries + + def push(self, my_dict, key, element): + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + def json_format_dict(self, data, pretty=False): + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +LibvirtInventory() diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml new file mode 100644 index 000000000..b8961704e --- /dev/null +++ b/playbooks/aws/openshift-cluster/config.yml @@ -0,0 +1,36 @@ +--- +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ ec2_private_ip_address }}" + openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index 3561c1803..e7125ea0c 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -4,59 +4,26 @@ connection: local gather_facts: no vars_files: - - vars.yml + - vars.yml tasks: - - set_fact: k8s_type="master" - - - name: Generate master instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: master_names_output - with_sequence: start=1 end={{ num_masters }} - - # These set_fact's cannot be combined - - set_fact: - master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - master_names: "{{ master_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - - - set_fact: k8s_type="node" - - - name: Generate node instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: node_names_output - with_sequence: start=1 end={{ num_nodes }} - - # These set_fact's cannot be combined - - set_fact: - node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - node_names: "{{ node_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - -- hosts: "tag_env_{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" + - fail: + msg: Deployment type not supported for libvirt provider yet + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + +- include: update.yml - include: list.yml diff --git a/playbooks/aws/openshift-cluster/launch_instances.yml b/playbooks/aws/openshift-cluster/launch_instances.yml deleted file mode 100644 index 9d645fbe5..000000000 --- a/playbooks/aws/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -- set_fact: - machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}" - machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}" - machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}" - machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" - created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" - security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" - env: "{{ cluster }}" - host_type: "{{ type }}" - env_host_type: "{{ cluster }}-openshift-{{ type }}" - -- name: Launch instance(s) - ec2: - state: present - region: "{{ machine_region }}" - keypair: "{{ machine_keypair }}" - group: "{{ security_group }}" - instance_type: "{{ machine_type }}" - image: "{{ machine_image }}" - count: "{{ instances | oo_len }}" - wait: yes - instance_tags: - created-by: "{{ created_by }}" - env: "{{ env }}" - host-type: "{{ host_type }}" - env-host-type: "{{ env_host_type }}" - register: ec2 - -- name: Add Name tag to instances - ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present - with_together: - - instances - - ec2.instances - args: - tags: - Name: "{{ item.0 }}" - -- set_fact: - instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} - -- name: Add new instances groups and variables - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - groups: "{{ instance_groups }}" - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - with_together: - - instances - - ec2.instances - -- name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" - with_items: ec2.instances - -- name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: ec2.instances diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 08e9e2df4..5c04bc320 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -2,16 +2,23 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + vars_files: + - vars.yml tasks: - set_fact: scratch_group=tag_env_{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all - when: scratch_group is not defined - - add_host: name={{ item }} groups=oo_list_hosts - with_items: groups[scratch_group] | difference(['localhost']) + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - name: List Hosts hosts: oo_list_hosts gather_facts: no tasks: - - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}" + - debug: + msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..58b4082df --- /dev/null +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,69 @@ +--- +# TODO: modify machine_image based on deployment_type +- set_fact: + machine_type: "{{ lookup('env', 'ec2_instance_type') | default('m3.large', true) }}" + machine_image: "{{ lookup('env', 'ec2_ami') | default(deployment_vars[deployment_type].image, true) }}" + machine_region: "{{ lookup('env', 'ec2_region') | default(deployment_vars[deployment_type].region, true) }}" + machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" + created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" + security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" + env: "{{ cluster }}" + host_type: "{{ type }}" + env_host_type: "{{ cluster }}-openshift-{{ type }}" + +- name: Launch instance(s) + ec2: + state: present + region: "{{ machine_region }}" + keypair: "{{ machine_keypair }}" + group: "{{ security_group }}" + instance_type: "{{ machine_type }}" + image: "{{ machine_image }}" + count: "{{ instances | oo_len }}" + wait: yes + instance_tags: + created-by: "{{ created_by }}" + env: "{{ env }}" + host-type: "{{ host_type }}" + env-host-type: "{{ env_host_type }}" + deployment-type: "{{ deployment_type }}" + register: ec2 + +- name: Add Name tag to instances + ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present + with_together: + - instances + - ec2.instances + args: + tags: + Name: "{{ item.0 }}" + +- set_fact: + instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_deployment-type_{{ deployment_type }} + +- name: Add new instances groups and variables + add_host: + hostname: "{{ item.0 }}" + ansible_ssh_host: "{{ item.1.dns_name }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: "{{ instance_groups }}" + ec2_private_ip_address: "{{ item.1.private_ip }}" + ec2_ip_address: "{{ item.1.public_ip }}" + with_together: + - instances + - ec2.instances + +- name: Wait for ssh + wait_for: "port=22 host={{ item.dns_name }}" + with_items: ec2.instances + +- name: Wait for user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_together: + - instances + - ec2.instances diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 39607633a..1d2b60594 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -1,14 +1,26 @@ --- - name: Terminate instance(s) hosts: localhost - + gather_facts: no vars_files: - - vars.yml + - vars.yml + tasks: + - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-node + - add_host: + name: "{{ item }}" + groups: oo_nodes_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) + + - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-master + - add_host: + name: "{{ item }}" + groups: oo_masters_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - include: ../openshift-node/terminate.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]' - include: ../openshift-master/terminate.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]' diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index 90ecdc6ab..5e7ab4e58 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -1,13 +1,18 @@ --- -- hosts: "tag_env_{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([]) -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]" +- include: ../../common/openshift-cluster/update_repos_and_packages.yml -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]" +- include: config.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index ed97d539c..f0df3d6f5 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1 +1,20 @@ --- +deployment_vars: + origin: + # fedora, since centos requires marketplace + image: ami-acd999c4 + region: us-east-1 + ssh_user: fedora + sudo: yes + online: + # private ami + image: ami-307b3658 + region: us-east-1 + ssh_user: root + sudo: no + enterprise: + # rhel-7.1, requires cloud access subscription + image: ami-10663b78 + region: us-east-1 + ssh_user: ec2-user + sudo: yes diff --git a/playbooks/aws/openshift-master/config.yml b/playbooks/aws/openshift-master/config.yml index 1c4060eee..37ab4fbe6 100644 --- a/playbooks/aws/openshift-master/config.yml +++ b/playbooks/aws/openshift-master/config.yml @@ -1,24 +1,19 @@ --- -- name: Populate oo_masters_to_config host group if needed +- name: Populate oo_masters_to_config host group hosts: localhost gather_facts: no tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_masters_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) -- name: Configure instances - hosts: oo_masters_to_config +- include: ../../common/openshift-master/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" - # TODO: this should be removed once openshift-sdn packages are available - openshift_use_openshift_sdn: False - vars_files: - - vars.yml - roles: - - openshift_master - #- openshift_sdn_master - - pods - - os_env_extras diff --git a/playbooks/aws/openshift-master/launch.yml b/playbooks/aws/openshift-master/launch.yml index 3d87879a0..6b3751682 100644 --- a/playbooks/aws/openshift-master/launch.yml +++ b/playbooks/aws/openshift-master/launch.yml @@ -4,14 +4,12 @@ connection: local gather_facts: no +# TODO: modify atomic_ami based on deployment_type vars: inst_region: us-east-1 atomic_ami: ami-86781fee user_data_file: user_data.txt - vars_files: - - vars.yml - tasks: - name: Launch instances ec2: @@ -40,7 +38,7 @@ Name: "{{ item.0 }}" - name: Add other tags to instances - ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present with_items: ec2.instances args: tags: "{{ oo_new_inst_tags }}" @@ -57,7 +55,7 @@ - ec2.instances - name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" + wait_for: port=22 host={{ item.dns_name }} with_items: ec2.instances - name: Wait for root user setup diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml index fd15cf00f..a790336b1 100644 --- a/playbooks/aws/openshift-master/terminate.yml +++ b/playbooks/aws/openshift-master/terminate.yml @@ -1,15 +1,15 @@ --- -- name: Populate oo_masters_to_terminate host group if needed +- name: Populate oo_masters_to_terminate host group hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_masters_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_terminate + add_host: name={{ item }} groups=oo_masters_to_terminate + with_items: oo_host_group_exp | default([]) -- name: Gather facts for instances to terminate +- name: Gather dynamic inventory variables for hosts to terminate hosts: oo_masters_to_terminate + gather_facts: no - name: Terminate instances hosts: localhost @@ -27,11 +27,12 @@ ignore_errors: yes register: ec2_term with_items: host_vars + when: "'oo_masters_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + when: "'oo_masters_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" with_items: ec2_term.results - name: Stop instance if termination failed @@ -42,6 +43,7 @@ register: ec2_stop when: item.failed with_items: ec2_term.results + when: "'oo_masters_to_terminate' in groups" - name: Rename stopped instances ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present @@ -49,4 +51,5 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results + when: "'oo_masters_to_terminate' in groups" diff --git a/playbooks/aws/openshift-master/vars.yml b/playbooks/aws/openshift-master/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/aws/openshift-master/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/aws/openshift-node/config.yml b/playbooks/aws/openshift-node/config.yml index b08ed7571..fc9b397b4 100644 --- a/playbooks/aws/openshift-node/config.yml +++ b/playbooks/aws/openshift-node/config.yml @@ -1,107 +1,25 @@ --- -- name: Populate oo_nodes_to_config host group if needed +- name: Populate oo_nodes_to_config and oo_first_master host groups hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_nodes_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - add_host: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) + - name: Evaluate oo_first_master + add_host: name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}" groups: oo_first_master - when: oo_host_group_exp is defined + ansible_ssh_user: root -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ ec2_private_ip_address }}" - public_hostname: "{{ ec2_ip_address }}" - # TODO: this should be removed once openshift-sdn packages are available - use_openshift_sdn: False - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml +- include: ../../common/openshift-node/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ ec2_private_ip_address }}" openshift_public_hostname: "{{ ec2_ip_address }}" - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - #- openshift_sdn_node - - os_env_extras - - os_env_extras_node diff --git a/playbooks/aws/openshift-node/launch.yml b/playbooks/aws/openshift-node/launch.yml index b7ef593e7..36aee14ff 100644 --- a/playbooks/aws/openshift-node/launch.yml +++ b/playbooks/aws/openshift-node/launch.yml @@ -4,14 +4,12 @@ connection: local gather_facts: no +# TODO: modify atomic_ami based on deployment_type vars: inst_region: us-east-1 atomic_ami: ami-86781fee user_data_file: user_data.txt - vars_files: - - vars.yml - tasks: - name: Launch instances ec2: @@ -33,7 +31,7 @@ with_items: ec2.instances - name: Add Name and environment tags to instances - ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present with_together: - oo_new_inst_names - ec2.instances @@ -42,7 +40,7 @@ Name: "{{ item.0 }}" - name: Add other tags to instances - ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present" + ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present with_items: ec2.instances args: tags: "{{ oo_new_inst_tags }}" @@ -59,7 +57,7 @@ - ec2.instances - name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" + wait_for: port=22 host={{ item.dns_name }} with_items: ec2.instances - name: Wait for root user setup diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml index 1c0c77eb7..40ae56f99 100644 --- a/playbooks/aws/openshift-node/terminate.yml +++ b/playbooks/aws/openshift-node/terminate.yml @@ -1,15 +1,15 @@ --- -- name: Populate oo_nodes_to_terminate host group if needed +- name: Populate oo_nodes_to_terminate host group hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_nodes_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_nodes_to_terminate + add_host: name={{ item }} groups=oo_nodes_to_terminate + with_items: oo_host_group_exp | default([]) -- name: Gather facts for instances to terminate +- name: Gather dynamic inventory variables for hosts to terminate hosts: oo_nodes_to_terminate + gather_facts: no - name: Terminate instances hosts: localhost @@ -27,11 +27,12 @@ ignore_errors: yes register: ec2_term with_items: host_vars + when: "'oo_nodes_to_terminate' in groups" # Fail if any of the instances failed to terminate with an error other # than 403 Forbidden - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + when: "'oo_nodes_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" with_items: ec2_term.results - name: Stop instance if termination failed @@ -42,6 +43,7 @@ register: ec2_stop when: item.failed with_items: ec2_term.results + when: "'oo_nodes_to_terminate' in groups" - name: Rename stopped instances ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present @@ -49,4 +51,5 @@ tags: Name: "{{ item.item.item.ec2_tag_Name }}-terminate" with_items: ec2_stop.results + when: "'oo_nodes_to_terminate' in groups" diff --git a/playbooks/aws/openshift-node/vars.yml b/playbooks/aws/openshift-node/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/aws/openshift-node/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml index 706f9285c..f61d277c6 100644 --- a/playbooks/byo/openshift-master/config.yml +++ b/playbooks/byo/openshift-master/config.yml @@ -1,9 +1,15 @@ --- -- name: Gather facts for node hosts - hosts: nodes +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + tasks: + - add_host: + name: "{{ item }}" + groups: oo_masters_to_config + with_items: groups['masters'] -- name: Configure master instances - hosts: masters - roles: - - openshift_master - - openshift_sdn_master +- include: ../../common/openshift-master/config.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml index 69ad7a840..d569827b4 100644 --- a/playbooks/byo/openshift-node/config.yml +++ b/playbooks/byo/openshift-node/config.yml @@ -1,79 +1,21 @@ --- -- name: Gather facts for node hosts - hosts: nodes - roles: - - openshift_facts +- name: Populate oo_nodes_to_config and oo_first_master host groups + hosts: localhost + gather_facts: no tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: 'node' - local_facts: - hostname: "{{ openshift_hostname | default(None) }}" - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + with_items: groups.nodes + - name: Evaluate oo_first_master + add_host: + name: "{{ groups.masters[0] }}" + groups: oo_first_master -- name: Register nodes - hosts: masters[0] +- include: ../../common/openshift-node/config.yml vars: - openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - - -- name: Configure node instances - hosts: nodes - vars: - sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001 - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - openshift_sdn_node + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml new file mode 100644 index 000000000..cd282270f --- /dev/null +++ b/playbooks/byo/openshift_facts.yml @@ -0,0 +1,10 @@ +--- +- name: Gather OpenShift facts + hosts: all + gather_facts: no + roles: + - openshift_facts + tasks: + - openshift_facts: + register: result + - debug: var=result diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml new file mode 100644 index 000000000..14ffa928f --- /dev/null +++ b/playbooks/common/openshift-cluster/config.yml @@ -0,0 +1,4 @@ +--- +- include: ../openshift-master/config.yml + +- include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cluster/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/roles b/playbooks/common/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file diff --git a/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml new file mode 100644 index 000000000..118727273 --- /dev/null +++ b/playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml @@ -0,0 +1,11 @@ +--- +- set_fact: k8s_type="master" + +- name: Generate master instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: master_names_output + with_sequence: start=1 end={{ num_masters }} + +- set_fact: + master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml new file mode 100644 index 000000000..162315d46 --- /dev/null +++ b/playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml @@ -0,0 +1,11 @@ +--- +- set_fact: k8s_type="node" + +- name: Generate node instance names(s) + set_fact: + scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + register: node_names_output + with_sequence: start=1 end={{ num_nodes }} + +- set_fact: + node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml new file mode 100644 index 000000000..e92c6f1ee --- /dev/null +++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml @@ -0,0 +1,7 @@ +--- +- hosts: oo_hosts_to_update + vars: + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_repos + - os_update_latest diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml new file mode 100644 index 000000000..05822d118 --- /dev/null +++ b/playbooks/common/openshift-master/config.yml @@ -0,0 +1,19 @@ +--- +- name: Configure master instances + hosts: oo_masters_to_config + vars: + openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001 + roles: + - openshift_master + - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool } + tasks: + - name: Create group for deployment type + group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + +# Additional instance config for online deployments +- name: Additional instance config + hosts: oo_masters_deployment_type_online + roles: + - pods + - os_env_extras diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-master/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-master/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml new file mode 100644 index 000000000..c82d69c28 --- /dev/null +++ b/playbooks/common/openshift-node/config.yml @@ -0,0 +1,121 @@ +--- +- name: Gather and set facts for node hosts + hosts: oo_nodes_to_config + roles: + - openshift_facts + tasks: + # Since the master is registering the nodes before they are configured, we + # need to make sure to set the node properties beforehand if we do not want + # the defaults + - openshift_facts: + role: "{{ item.role }}" + local_facts: "{{ item.local_facts }}" + with_items: + - role: common + local_facts: + hostname: "{{ openshift_hostname | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + - role: node + local_facts: + external_id: "{{ openshift_node_external_id | default(None) }}" + resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}" + resources_memory: "{{ openshift_node_resources_memory | default(None) }}" + pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}" + labels: "{{ openshift_node_labels | default(None) }}" + annotations: "{{ openshift_node_annotations | default(None) }}" + deployment_type: "{{ openshift_deployment_type }}" + + +- name: Create temp directory for syncing certs + hosts: localhost + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: mktemp + changed_when: False + + +- name: Register nodes + hosts: oo_first_master + vars: + openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + roles: + - openshift_register_nodes + tasks: + - name: Create the temp directory on the master + file: + path: "{{ sync_tmpdir }}" + owner: "{{ ansible_ssh_user }}" + mode: 0700 + state: directory + changed_when: False + + - name: Create a tarball of the node config directories + command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./ + args: + chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}" + with_items: openshift_nodes + changed_when: False + + - name: Retrieve the node config tarballs from the master + fetch: + src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz" + dest: "{{ sync_tmpdir }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: openshift_nodes + changed_when: False + + - name: Remove the temp directory on the master + file: + path: "{{ sync_tmpdir }}" + state: absent + changed_when: False + + +- name: Configure node instances + hosts: oo_nodes_to_config + gather_facts: no + vars: + sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}" + openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001" + pre_tasks: + - name: Ensure certificate directory exists + file: + path: "{{ openshift_node_cert_dir }}" + state: directory + + # TODO: notify restart openshift-node and/or restart openshift-sdn-node, + # possibly test service started time against certificate/config file + # timestamps in openshift-node or openshift-sdn-node to trigger notify + - name: Unarchive the tarball on the node + unarchive: + src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz" + dest: "{{ openshift_node_cert_dir }}" + roles: + - openshift_node + - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool } + tasks: + - name: Create group for deployment type + group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} + changed_when: False + + +- name: Delete temporary directory + hosts: localhost + gather_facts: no + tasks: + - file: name={{ mktemp.stdout }} state=absent + changed_when: False + + +# Additional config for online type deployments +- name: Additional instance config + hosts: oo_nodes_deployment_type_online + gather_facts: no + roles: + - os_env_extras + - os_env_extras_node diff --git a/playbooks/common/openshift-node/filter_plugins b/playbooks/common/openshift-node/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-node/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins \ No newline at end of file diff --git a/playbooks/common/openshift-node/roles b/playbooks/common/openshift-node/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-node/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml new file mode 100644 index 000000000..8b8490246 --- /dev/null +++ b/playbooks/gce/openshift-cluster/config.yml @@ -0,0 +1,37 @@ +--- +# TODO: fix firewall related bug with GCE and origin, since GCE is overriding +# /etc/sysconfig/iptables +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 14cdd2537..34a5a0b94 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -4,59 +4,25 @@ connection: local gather_facts: no vars_files: - - vars.yml + - vars.yml tasks: - - set_fact: k8s_type="master" - - - name: Generate master instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: master_names_output - with_sequence: start=1 end={{ num_masters }} - - # These set_fact's cannot be combined - - set_fact: - master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - master_names: "{{ master_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - - - set_fact: k8s_type="node" - - - name: Generate node instance names(s) - set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }} - register: node_names_output - with_sequence: start=1 end={{ num_nodes }} - - # These set_fact's cannot be combined - - set_fact: - node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}" - - - set_fact: - node_names: "{{ node_names_string.strip().split(' ') }}" - - - include: launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - -- hosts: "tag_env-{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" + - fail: msg="Deployment type not supported for libvirt provider yet" + when: deployment_type == 'enterprise' + + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" + +- include: update.yml - include: list.yml diff --git a/playbooks/gce/openshift-cluster/launch_instances.yml b/playbooks/gce/openshift-cluster/launch_instances.yml deleted file mode 100644 index b4f33bd87..000000000 --- a/playbooks/gce/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -# TODO: when we are ready to go to ansible 1.9+ support only, we can update to -# the gce task to use the disk_auto_delete parameter to avoid having to delete -# the disk as a separate step on termination - -- set_fact: - machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}" - machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}" - -- name: Launch instance(s) - gce: - instance_names: "{{ instances }}" - machine_type: "{{ machine_type }}" - image: "{{ machine_image }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - tags: - - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}" - - "env-{{ cluster }}" - - "host-type-{{ type }}" - - "env-host-type-{{ cluster }}-openshift-{{ type }}" - register: gce - -- name: Add new instances to groups and set variables needed - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" - gce_public_ip: "{{ item.public_ip }}" - gce_private_ip: "{{ item.private_ip }}" - with_items: gce.instance_data - -- name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" - with_items: gce.instance_data - -- name: Wait for root user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index 1124b0ea3..bab2fb9f8 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -2,16 +2,23 @@ - name: Generate oo_list_hosts group hosts: localhost gather_facts: no + vars_files: + - vars.yml tasks: - set_fact: scratch_group=tag_env-{{ cluster_id }} when: cluster_id != '' - set_fact: scratch_group=all - when: scratch_group is not defined - - add_host: name={{ item }} groups=oo_list_hosts - with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated) + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - name: List Hosts hosts: oo_list_hosts gather_facts: no tasks: - - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}" + - debug: + msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..a68edefae --- /dev/null +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,42 @@ +--- +# TODO: when we are ready to go to ansible 1.9+ support only, we can update to +# the gce task to use the disk_auto_delete parameter to avoid having to delete +# the disk as a separate step on termination +- name: Launch instance(s) + gce: + instance_names: "{{ instances }}" + machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}" + image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}" + service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" + pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" + project_id: "{{ lookup('env', 'gce_project_id') }}" + tags: + - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }} + - env-{{ cluster }} + - host-type-{{ type }} + - env-host-type-{{ cluster }}-openshift-{{ type }} + - deployment-type-{{ deployment_type }} + register: gce + +- name: Add new instances to groups and set variables needed + add_host: + hostname: "{{ item.name }}" + ansible_ssh_host: "{{ item.public_ip }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" + gce_public_ip: "{{ item.public_ip }}" + gce_private_ip: "{{ item.private_ip }}" + with_items: gce.instance_data + +- name: Wait for ssh + wait_for: port=22 host={{ item.public_ip }} + with_items: gce.instance_data + +- name: Wait for user setup + command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" + register: result + until: result.rc == 0 + retries: 20 + delay: 10 + with_items: gce.instance_data diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 0281ae953..abe6a4c95 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -1,20 +1,34 @@ --- - name: Terminate instance(s) hosts: localhost - + gather_facts: no vars_files: - - vars.yml + - vars.yml + tasks: + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node + - add_host: + name: "{{ item }}" + groups: oo_nodes_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) + + - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master + - add_host: + name: "{{ item }}" + groups: oo_masters_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated) - include: ../openshift-node/terminate.yml vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" gce_project_id: "{{ lookup('env', 'gce_project_id') }}" - include: ../openshift-master/terminate.yml vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 973e4c3ef..9ebf39a13 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -1,13 +1,18 @@ --- -- hosts: "tag_env-{{ cluster_id }}" - roles: - - openshift_repos - - os_update_latest +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]" +- include: ../../common/openshift-cluster/update_repos_and_packages.yml -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]" +- include: config.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml index ed97d539c..ae33083b9 100644 --- a/playbooks/gce/openshift-cluster/vars.yml +++ b/playbooks/gce/openshift-cluster/vars.yml @@ -1 +1,15 @@ --- +deployment_vars: + origin: + image: centos-7 + ssh_user: + sudo: yes + online: + image: libra-rhel7 + ssh_user: root + sudo: no + enterprise: + image: rhel-7 + ssh_user: + sudo: yes + diff --git a/playbooks/gce/openshift-master/config.yml b/playbooks/gce/openshift-master/config.yml index 857da0763..af6000bc8 100644 --- a/playbooks/gce/openshift-master/config.yml +++ b/playbooks/gce/openshift-master/config.yml @@ -1,20 +1,18 @@ --- -- name: master/config.yml, populate oo_masters_to_config host group if needed +- name: Populate oo_masters_to_config host group hosts: localhost gather_facts: no tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: "name={{ item }} groups=oo_masters_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + groups: oo_masters_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) -- name: "Configure instances" - hosts: oo_masters_to_config +- include: ../../common/openshift-master/config.yml vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" openshift_hostname: "{{ gce_private_ip }}" - vars_files: - - vars.yml - roles: - - openshift_master - - pods - - os_env_extras diff --git a/playbooks/gce/openshift-master/launch.yml b/playbooks/gce/openshift-master/launch.yml index 287596002..ef10b6cf0 100644 --- a/playbooks/gce/openshift-master/launch.yml +++ b/playbooks/gce/openshift-master/launch.yml @@ -8,14 +8,12 @@ connection: local gather_facts: no +# TODO: modify image based on deployment_type vars: inst_names: "{{ oo_new_inst_names }}" machine_type: n1-standard-1 image: libra-rhel7 - vars_files: - - vars.yml - tasks: - name: Launch instances gce: @@ -37,7 +35,7 @@ with_items: gce.instance_data - name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" + wait_for: port=22 host={{ item.public_ip }} with_items: gce.instance_data - name: Wait for root user setup diff --git a/playbooks/gce/openshift-master/terminate.yml b/playbooks/gce/openshift-master/terminate.yml index 8319774f8..452ac5199 100644 --- a/playbooks/gce/openshift-master/terminate.yml +++ b/playbooks/gce/openshift-master/terminate.yml @@ -3,10 +3,9 @@ hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_masters_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_masters_to_terminate + add_host: name={{ item }} groups=oo_masters_to_terminate + with_items: oo_host_group_exp | default([]) - name: Terminate master instances hosts: localhost @@ -22,6 +21,7 @@ instance_names: "{{ groups['oo_masters_to_terminate'] }}" disks: "{{ groups['oo_masters_to_terminate'] }}" register: gce + when: "'oo_masters_to_terminate' in groups" - name: Remove disks of instances gce_pd: @@ -32,5 +32,4 @@ zone: "{{ gce.zone }}" state: absent with_items: gce.instance_names - - + when: "'oo_masters_to_terminate' in groups" diff --git a/playbooks/gce/openshift-master/vars.yml b/playbooks/gce/openshift-master/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/gce/openshift-master/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/gce/openshift-node/config.yml b/playbooks/gce/openshift-node/config.yml index 771cc3a94..5b1601176 100644 --- a/playbooks/gce/openshift-node/config.yml +++ b/playbooks/gce/openshift-node/config.yml @@ -1,100 +1,24 @@ --- -- name: node/config.yml, populate oo_nodes_to_config host group if needed +- name: Populate oo_nodes_to_config and oo_first_master host groups hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp - add_host: "name={{ item }} groups=oo_nodes_to_config" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - add_host: + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + groups: oo_nodes_to_config + ansible_ssh_user: root + with_items: oo_host_group_exp | default([]) + - name: Evaluate oo_first_master + add_host: name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" groups: oo_first_master - when: oo_host_group_exp is defined + ansible_ssh_user: root -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ gce_private_ip }}" - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml +- include: ../../common/openshift-node/config.yml vars: - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - os_env_extras - - os_env_extras_node + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" + openshift_hostname: "{{ gce_private_ip }}" diff --git a/playbooks/gce/openshift-node/launch.yml b/playbooks/gce/openshift-node/launch.yml index 73d0478ab..086ba58bc 100644 --- a/playbooks/gce/openshift-node/launch.yml +++ b/playbooks/gce/openshift-node/launch.yml @@ -8,14 +8,12 @@ connection: local gather_facts: no +# TODO: modify image based on deployment_type vars: inst_names: "{{ oo_new_inst_names }}" machine_type: n1-standard-1 image: libra-rhel7 - vars_files: - - vars.yml - tasks: - name: Launch instances gce: @@ -37,7 +35,7 @@ with_items: gce.instance_data - name: Wait for ssh - wait_for: "port=22 host={{ item.public_ip }}" + wait_for: port=22 host={{ item.public_ip }} with_items: gce.instance_data - name: Wait for root user setup diff --git a/playbooks/gce/openshift-node/terminate.yml b/playbooks/gce/openshift-node/terminate.yml index 7d71dfcab..357e0c295 100644 --- a/playbooks/gce/openshift-node/terminate.yml +++ b/playbooks/gce/openshift-node/terminate.yml @@ -3,10 +3,9 @@ hosts: localhost gather_facts: no tasks: - - name: Evaluate oo_host_group_exp if it's set - add_host: "name={{ item }} groups=oo_nodes_to_terminate" - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined + - name: Evaluate oo_nodes_to_terminate + add_host: name={{ item }} groups=oo_nodes_to_terminate + with_items: oo_host_group_exp | default([]) - name: Terminate node instances hosts: localhost @@ -22,6 +21,7 @@ instance_names: "{{ groups['oo_nodes_to_terminate'] }}" disks: "{{ groups['oo_nodes_to_terminate'] }}" register: gce + when: "'oo_nodes_to_terminate' in groups" - name: Remove disks of instances gce_pd: @@ -32,5 +32,4 @@ zone: "{{ gce.zone }}" state: absent with_items: gce.instance_names - - + when: "'oo_nodes_to_terminate' in groups" diff --git a/playbooks/gce/openshift-node/vars.yml b/playbooks/gce/openshift-node/vars.yml deleted file mode 100644 index c196b2fca..000000000 --- a/playbooks/gce/openshift-node/vars.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -openshift_debug_level: 4 -openshift_cluster_id: "{{ cluster_id }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml new file mode 100644 index 000000000..faf278b10 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/config.yml @@ -0,0 +1,38 @@ +--- +# TODO: need to figure out a plan for setting hostname, currently the default +# is localhost, so no hostname value (or public_hostname) value is getting +# assigned + +- name: Populate oo_masters_to_config host group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_masters_to_config + add_host: + name: "{{ item }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_masters_to_config + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]) + - name: Evaluate oo_nodes_to_config + add_host: + name: "{{ item }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_nodes_to_config + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]) + - name: Evaluate oo_first_master + add_host: + name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: oo_first_master + when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups" + +- include: ../../common/openshift-cluster/config.yml + vars: + openshift_cluster_id: "{{ cluster_id }}" + openshift_debug_level: 4 + openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml index 6f2df33af..a7ddc1e7e 100644 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ b/playbooks/libvirt/openshift-cluster/launch.yml @@ -1,65 +1,36 @@ +--- - name: Launch instance(s) hosts: localhost - connection: local gather_facts: no - - vars: - libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift" - libvirt_storage_pool: 'openshift' - libvirt_uri: 'qemu:///system' - vars_files: - - vars.yml - + - vars.yml + vars: + os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}" + os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}" + os_libvirt_network: "{{ libvirt_network | default('default') }}" + image_url: "{{ deployment_vars[deployment_type].image.url }}" + image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" + image_name: "{{ deployment_vars[deployment_type].image.name }}" tasks: - - set_fact: - k8s_type: master - - - name: Generate master instance name(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}" - register: master_names_output - with_sequence: start=1 end='{{ num_masters }}' + - fail: msg="Deployment type not supported for libvirt provider yet" + when: deployment_type in ['online', 'enterprise'] - - set_fact: - master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" + - include: tasks/configure_libvirt.yml - - include: launch_instances.yml - vars: - instances: '{{ master_names }}' - cluster: '{{ cluster_id }}' - type: '{{ k8s_type }}' - group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master' + - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ master_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" - - set_fact: - k8s_type: node + - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml + - include: tasks/launch_instances.yml + vars: + instances: "{{ node_names }}" + cluster: "{{ cluster_id }}" + type: "{{ k8s_type }}" - - name: Generate node instance name(s) - set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}" - register: node_names_output - with_sequence: start=1 end='{{ num_nodes }}' +- include: update.yml - - set_fact: - node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}" - - - include: launch_instances.yml - vars: - instances: '{{ node_names }}' - cluster: '{{ cluster_id }}' - type: '{{ k8s_type }}' - -- hosts: 'tag_env-{{ cluster_id }}' - roles: - - openshift_repos - - os_update_latest - -- include: ../openshift-master/config.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]' - oo_env: '{{ cluster_id }}' - -- include: ../openshift-node/config.yml - vars: - oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]' - oo_env: '{{ cluster_id }}' +- include: list.yml diff --git a/playbooks/libvirt/openshift-cluster/launch_instances.yml b/playbooks/libvirt/openshift-cluster/launch_instances.yml deleted file mode 100644 index 3bbcae981..000000000 --- a/playbooks/libvirt/openshift-cluster/launch_instances.yml +++ /dev/null @@ -1,102 +0,0 @@ -- name: Create the libvirt storage directory for openshift - file: - dest: '{{ libvirt_storage_pool_path }}' - state: directory - -- name: Download Base Cloud image - get_url: - url: '{{ base_image_url }}' - sha256sum: '{{ base_image_sha256 }}' - dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}' - -- name: Create the cloud-init config drive path - file: - dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest' - state: directory - with_items: '{{ instances }}' - -- name: Create the cloud-init config drive files - template: - src: '{{ item[1] }}' - dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}' - with_nested: - - '{{ instances }}' - - [ user-data, meta-data ] - -- name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' - args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - with_items: '{{ instances }}' - -- name: Create the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' - ignore_errors: yes - -- name: Refresh the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - -- name: Create VMs drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2' - with_items: '{{ instances }}' - -- name: Create VMs - virt: - name: '{{ item }}' - command: define - xml: "{{ lookup('template', '../templates/domain.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Start VMs - virt: - name: '{{ item }}' - state: running - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Collect MAC addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' - register: scratch_mac - with_items: '{{ instances }}' - -- name: Wait for the VMs to get an IP - command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp" - ignore_errors: yes - register: nb_allocated_ips - until: nb_allocated_ips.stdout == '{{ instances | length }}' - retries: 30 - delay: 1 - -- name: Collect IP addresses of the VMs - shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" - register: scratch_ip - with_items: '{{ scratch_mac.results }}' - -- set_fact: - ips: "{{ scratch_ip.results | oo_collect('stdout') }}" - -- name: Add new instances - add_host: - hostname: '{{ item.0 }}' - ansible_ssh_host: '{{ item.1 }}' - ansible_ssh_user: root - groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}' - with_together: - - instances - - ips - -- name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_items: ips - -- name: Wait for root user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_items: ips diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 6bf07e3c6..25a25f791 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -1,43 +1,23 @@ +--- - name: Generate oo_list_hosts group hosts: localhost - connection: local gather_facts: no - - vars: - libvirt_uri: 'qemu:///system' - + vars_files: + - vars.yml tasks: - - name: List VMs - virt: - command: list_vms - register: list_vms - - - name: Collect MAC addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' - register: scratch_mac - with_items: '{{ list_vms.list_vms }}' - when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' - - - name: Collect IP addresses of the VMs - shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" - register: scratch_ip - with_items: '{{ scratch_mac.results }}' - when: item.skipped is not defined - - - name: Add hosts - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[1].stdout }}' - ansible_ssh_user: root - groups: oo_list_hosts - with_together: - - '{{ list_vms.list_vms }}' - - '{{ scratch_ip.results }}' - when: item[1].skipped is not defined + - set_fact: scratch_group=tag_env-{{ cluster_id }} + when: cluster_id != '' + - set_fact: scratch_group=all + when: cluster_id == '' + - add_host: + name: "{{ item }}" + groups: oo_list_hosts + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[scratch_group] | default([]) | difference(['localhost']) - name: List Hosts hosts: oo_list_hosts - tasks: - - debug: - msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}' + - debug: + msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}' diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml new file mode 100644 index 000000000..f237c1a60 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml @@ -0,0 +1,6 @@ +--- +- include: configure_libvirt_storage_pool.yml + when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined + +- include: configure_libvirt_network.yml + when: libvirt_network is defined diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml new file mode 100644 index 000000000..1cd83f7be --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -0,0 +1,27 @@ +--- +- name: Test if libvirt network for openshift already exists + command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}" + register: net_info_result + changed_when: False + failed_when: "net_info_result.rc != 0 and 'error: Network not found:' not in net_info_result.stderr" + +- name: Create a temp directory for the template xml file + command: "/usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX" + register: mktemp + when: net_info_result.rc == 1 + +- name: Create network xml file + template: + src: templates/network.xml + dest: "{{ mktemp.stdout }}/network.xml" + when: net_info_result.rc == 1 + +- name: Create libvirt network for openshift + command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml" + when: net_info_result.rc == 1 + +- name: Remove the temp directory + file: + path: "{{ mktemp.stdout }}" + state: absent + when: net_info_result.rc == 1 diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml new file mode 100644 index 000000000..817acb250 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -0,0 +1,27 @@ +--- +- name: Create libvirt storage directory for openshift + file: + dest: "{{ libvirt_storage_pool_path }}" + state: directory + +- acl: + default: yes + entity: kvm + etype: group + name: "{{ libvirt_storage_pool_path }}" + permissions: rwx + state: present + +- name: Test if libvirt storage pool for openshift already exists + command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" + register: pool_info_result + changed_when: False + failed_when: "pool_info_result.rc != 0 and 'error: Storage pool not found:' not in pool_info_result.stderr" + +- name: Create the libvirt storage pool for openshift + command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' + when: pool_info_result.rc == 1 + +- name: Refresh the libvirt storage pool for openshift + command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' + when: pool_info_result.rc == 1 diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml new file mode 100644 index 000000000..96d440096 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -0,0 +1,104 @@ +--- +# TODO: Add support for choosing base image based on deployment_type and os +# wanted (os wanted needs support added in bin/cluster with sane defaults: +# fedora/centos for origin, rhel for online/enterprise) + +# TODO: create a role to encapsulate some of this complexity, possibly also +# create a module to manage the storage tasks, network tasks, and possibly +# even handle the libvirt tasks to set metadata in the domain xml and be able +# to create/query data about vms without having to use xml the python libvirt +# bindings look like a good candidate for this + +- name: Download Base Cloud image + get_url: + url: '{{ image_url }}' + sha256sum: '{{ image_sha256 }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}' + +- name: Create the cloud-init config drive path + file: + dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' + state: directory + with_items: instances + +- name: Create the cloud-init config drive files + template: + src: '{{ item[1] }}' + dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' + with_nested: + - instances + - [ user-data, meta-data ] + +- name: Create the cloud-init config drive + command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + args: + chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/' + creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + with_items: instances + +- name: Create VMs drives + command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' + with_items: instances + +- name: Create VMs + virt: + name: '{{ item }}' + command: define + xml: "{{ lookup('template', '../templates/domain.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: instances + +- name: Start VMs + virt: + name: '{{ item }}' + state: running + uri: '{{ libvirt_uri }}' + with_items: instances + +- name: Collect MAC addresses of the VMs + shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -' + register: scratch_mac + with_items: instances + +- name: Wait for the VMs to get an IP + command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp" + ignore_errors: yes + register: nb_allocated_ips + until: nb_allocated_ips.stdout == '{{ instances | length }}' + retries: 30 + delay: 1 + +- name: Collect IP addresses of the VMs + shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp" + register: scratch_ip + with_items: scratch_mac.results + +- set_fact: + ips: "{{ scratch_ip.results | oo_collect('stdout') }}" + +- name: Add new instances + add_host: + hostname: '{{ item.0 }}' + ansible_ssh_host: '{{ item.1 }}' + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}' + with_together: + - instances + - ips + +- name: Wait for ssh + wait_for: + host: '{{ item }}' + port: 22 + with_items: ips + +- name: Wait for openshift user setup + command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup' + register: result + until: result.rc == 0 + retries: 30 + delay: 1 + with_together: + - instances + - ips diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml new file mode 100644 index 000000000..8cb017367 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -0,0 +1,67 @@ + + {{ item }} + 1 + + deployment-type-{{ deployment_type }} + env-{{ cluster }} + env-host-type-{{ cluster }}-openshift-{{ type }} + host-type-{{ type }} + + 1 + 2 + + hvm + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/qemu-system-x86_64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data new file mode 100644 index 000000000..6b421770d --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/meta-data @@ -0,0 +1,3 @@ +instance-id: {{ item[0] }} +hostname: {{ item[0] }} +local-hostname: {{ item[0] }}.example.com diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml new file mode 100644 index 000000000..86dcd62bb --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/network.xml @@ -0,0 +1,23 @@ + + openshift-ansible + + + + + + + + + + + + + + + + + + + + + diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data new file mode 100644 index 000000000..77b788109 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/user-data @@ -0,0 +1,23 @@ +#cloud-config +disable_root: true + +hostname: {{ item[0] }} +fqdn: {{ item[0] }}.example.com +manage_etc_hosts: true + +users: + - default + - name: root + ssh_authorized_keys: + - {{ lookup('file', '~/.ssh/id_rsa.pub') }} + +system_info: + default_user: + name: openshift + sudo: ["ALL=(ALL) NOPASSWD: ALL"] + +ssh_authorized_keys: + - {{ lookup('file', '~/.ssh/id_rsa.pub') }} + +bootcmd: + - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index c609169d3..b173a09dd 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -1,41 +1,44 @@ +--- +# TODO: does not handle a non-existant cluster gracefully + - name: Terminate instance(s) hosts: localhost - connection: local gather_facts: no + vars_files: + - vars.yml + tasks: + - set_fact: cluster_group=tag_env-{{ cluster_id }} + - add_host: + name: "{{ item }}" + groups: oo_hosts_to_terminate + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups[cluster_group] | default([]) - vars: - libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift" - libvirt_storage_pool: 'openshift' - libvirt_uri: 'qemu:///system' + - name: Destroy VMs + virt: + name: '{{ item[0] }}' + command: '{{ item[1] }}' + uri: '{{ libvirt_uri }}' + with_nested: + - groups['oo_hosts_to_terminate'] + - [ destroy, undefine ] - tasks: - - name: List VMs - virt: - command: list_vms - register: list_vms + - name: Delete VMs drives + command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2' + args: + removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2' + with_items: groups['oo_hosts_to_terminate'] - - name: Destroy VMs - virt: - name: '{{ item[0] }}' - command: '{{ item[1] }}' - uri: '{{ libvirt_uri }}' - with_nested: - - '{{ list_vms.list_vms }}' - - [ destroy, undefine ] - when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' + - name: Delete the VM cloud-init image + file: + path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + state: absent + with_items: groups['oo_hosts_to_terminate'] - - name: Delete VMs config drive - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack' - state: absent - with_items: '{{ list_vms.list_vms }}' - when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' + - name: Remove the cloud-init config directory + file: + path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' + state: absent + with_items: groups['oo_hosts_to_terminate'] - - name: Delete VMs drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}' - with_nested: - - '{{ list_vms.list_vms }}' - - [ '_configdrive', '_cloud-init.iso', '.qcow2' ] - when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...' diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml new file mode 100644 index 000000000..57e36db9e --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/update.yml @@ -0,0 +1,18 @@ +--- +- name: Populate oo_hosts_to_update group + hosts: localhost + gather_facts: no + vars_files: + - vars.yml + tasks: + - name: Evaluate oo_hosts_to_update + add_host: + name: "{{ item }}" + groups: oo_hosts_to_update + ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" + ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" + with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([]) + +- include: ../../common/openshift-cluster/update_repos_and_packages.yml + +- include: config.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml index 4e4eecd46..65d954fee 100644 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ b/playbooks/libvirt/openshift-cluster/vars.yml @@ -1,7 +1,33 @@ -# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2 -# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2 -# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86 +--- +libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" +libvirt_storage_pool: 'openshift-ansible' +libvirt_network: openshift-ansible +libvirt_uri: 'qemu:///system' -base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -base_image_name: CentOS-7-x86_64-GenericCloud.qcow2 -base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab +deployment_vars: + origin: + image: + url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2" + name: CentOS-7-x86_64-GenericCloud.qcow2 + sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab + ssh_user: openshift + sudo: yes + online: + image: + url: + name: + sha256: + ssh_user: root + sudo: no + enterprise: + image: + url: + name: + sha256: + ssh_user: openshift + sudo: yes +# origin: +# fedora: +# url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2" +# name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2 +# sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86 diff --git a/playbooks/libvirt/openshift-master/config.yml b/playbooks/libvirt/openshift-master/config.yml deleted file mode 100644 index dd95fd57f..000000000 --- a/playbooks/libvirt/openshift-master/config.yml +++ /dev/null @@ -1,21 +0,0 @@ -- name: master/config.yml, populate oo_masters_to_config host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: - name: '{{ item }}' - groups: oo_masters_to_config - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - -- name: Configure instances - hosts: oo_masters_to_config - vars: - openshift_hostname: '{{ ansible_default_ipv4.address }}' - vars_files: - - vars.yml - roles: - - openshift_master - - pods - - os_env_extras diff --git a/playbooks/libvirt/openshift-master/filter_plugins b/playbooks/libvirt/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/libvirt/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/libvirt/openshift-master/roles b/playbooks/libvirt/openshift-master/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/libvirt/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/libvirt/openshift-master/vars.yml b/playbooks/libvirt/openshift-master/vars.yml deleted file mode 100644 index ad0c0fbe2..000000000 --- a/playbooks/libvirt/openshift-master/vars.yml +++ /dev/null @@ -1 +0,0 @@ -openshift_debug_level: 4 diff --git a/playbooks/libvirt/openshift-node/config.yml b/playbooks/libvirt/openshift-node/config.yml deleted file mode 100644 index 3244a8046..000000000 --- a/playbooks/libvirt/openshift-node/config.yml +++ /dev/null @@ -1,102 +0,0 @@ -- name: node/config.yml, populate oo_nodes_to_config host group if needed - hosts: localhost - gather_facts: no - tasks: - - name: "Evaluate oo_host_group_exp if it's set" - add_host: - name: '{{ item }}' - groups: oo_nodes_to_config - with_items: "{{ oo_host_group_exp | default('') }}" - when: oo_host_group_exp is defined - - - add_host: - name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}" - groups: oo_first_master - when: oo_host_group_exp is defined - - -- name: Gather and set facts for hosts to configure - hosts: oo_nodes_to_config - roles: - - openshift_facts - tasks: - # Since the master is registering the nodes before they are configured, we - # need to make sure to set the node properties beforehand if we do not want - # the defaults - - openshift_facts: - role: "{{ item.role }}" - local_facts: "{{ item.local_facts }}" - with_items: - - role: common - local_facts: - hostname: "{{ ansible_default_ipv4.address }}" - - role: node - local_facts: - external_id: "{{ openshift_node_external_id | default(None) }}" - resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}" - resources_memory: "{{ openshfit_node_resources_memory | default(None) }}" - pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}" - labels: "{{ openshfit_node_labels | default(None) }}" - annotations: "{{ openshfit_node_annotations | default(None) }}" - - -- name: Register nodes - hosts: oo_first_master - vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) }}" - roles: - - openshift_register_nodes - tasks: - - name: Create local temp directory for syncing certs - local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX - register: mktemp - - - name: Sync master certs to localhost - synchronize: - mode: pull - checksum: yes - src: /var/lib/openshift/openshift.local.certificates - dest: "{{ mktemp.stdout }}" - -- name: Configure instances - hosts: oo_nodes_to_config - vars_files: - - vars.yml - vars: - sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}" - cert_parent_rel_path: openshift.local.certificates - cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}" - cert_base_path: /var/lib/openshift - cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}" - cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}" - pre_tasks: - - name: Ensure certificate directories exists - file: - path: "{{ item }}" - state: directory - with_items: - - "{{ cert_path }}" - - "{{ cert_parent_path }}/ca" - - # TODO: notify restart openshift-node and/or restart openshift-sdn-node, - # possibly test service started time against certificate/config file - # timestamps in openshift-node or openshift-sdn-node to trigger notify - - name: Sync certs to nodes - synchronize: - checksum: yes - src: "{{ item.src }}" - dest: "{{ item.dest }}" - owner: no - group: no - with_items: - - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}" - dest: "{{ cert_parent_path }}" - - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt" - dest: "{{ cert_parent_path }}/ca/cert.crt" - - local_action: file name={{ sync_tmpdir }} state=absent - run_once: true - roles: - - openshift_node - - os_env_extras - - os_env_extras_node diff --git a/playbooks/libvirt/openshift-node/filter_plugins b/playbooks/libvirt/openshift-node/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/libvirt/openshift-node/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/libvirt/openshift-node/roles b/playbooks/libvirt/openshift-node/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/libvirt/openshift-node/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles \ No newline at end of file diff --git a/playbooks/libvirt/openshift-node/vars.yml b/playbooks/libvirt/openshift-node/vars.yml deleted file mode 100644 index ad0c0fbe2..000000000 --- a/playbooks/libvirt/openshift-node/vars.yml +++ /dev/null @@ -1 +0,0 @@ -openshift_debug_level: 4 diff --git a/playbooks/libvirt/templates/domain.xml b/playbooks/libvirt/templates/domain.xml deleted file mode 100644 index da037d138..000000000 --- a/playbooks/libvirt/templates/domain.xml +++ /dev/null @@ -1,62 +0,0 @@ - - {{ item }} - 1 - 1 - 2 - - hvm - - - - - - - - - - - - - destroy - restart - restart - - /usr/bin/qemu-system-x86_64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/playbooks/libvirt/templates/meta-data b/playbooks/libvirt/templates/meta-data deleted file mode 100644 index 5d779519f..000000000 --- a/playbooks/libvirt/templates/meta-data +++ /dev/null @@ -1,2 +0,0 @@ -instance-id: {{ item[0] }} -local-hostname: {{ item[0] }} diff --git a/playbooks/libvirt/templates/user-data b/playbooks/libvirt/templates/user-data deleted file mode 100644 index 985badc8e..000000000 --- a/playbooks/libvirt/templates/user-data +++ /dev/null @@ -1,10 +0,0 @@ -#cloud-config - -disable_root: 0 - -system_info: - default_user: - name: root - -ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml index 941190534..c55677c3f 100644 --- a/roles/openshift_common/tasks/main.yml +++ b/roles/openshift_common/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Set common OpenShift facts openshift_facts: - role: 'common' + role: common local_facts: cluster_id: "{{ openshift_cluster_id | default('default') }}" debug_level: "{{ openshift_debug_level | default(0) }}" @@ -10,7 +10,7 @@ public_hostname: "{{ openshift_public_hostname | default(None) }}" public_ip: "{{ openshift_public_ip | default(None) }}" use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" - + deployment_type: "{{ openshift_deployment_type }}" - name: Set hostname hostname: name={{ openshift.common.hostname }} diff --git a/roles/openshift_common/vars/main.yml b/roles/openshift_common/vars/main.yml index 50816d319..9f657a2c7 100644 --- a/roles/openshift_common/vars/main.yml +++ b/roles/openshift_common/vars/main.yml @@ -5,3 +5,7 @@ # chains with the public zone (or the zone associated with the correct # interfaces) os_firewall_use_firewalld: False + +openshift_cert_parent_dir: /var/lib/openshift +openshift_cert_relative_dir: openshift.local.certificates +openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 0dd343443..1e0d5c605 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -21,8 +21,11 @@ class OpenShiftFactsUnsupportedRoleError(Exception): class OpenShiftFactsFileWriteError(Exception): pass +class OpenShiftFactsMetadataUnavailableError(Exception): + pass + class OpenShiftFacts(): - known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn'] + known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns'] def __init__(self, role, filename, local_facts): self.changed = False @@ -169,20 +172,18 @@ class OpenShiftFacts(): return hostname def get_defaults(self, roles): - hardware_facts = self.get_hardware_facts() - net_facts = self.get_net_facts() - base_facts = self.get_base_facts() + ansible_facts = self.get_ansible_facts() defaults = dict() common = dict(use_openshift_sdn=True) - ip = net_facts['default_ipv4']['address'] + ip = ansible_facts['default_ipv4']['address'] common['ip'] = ip common['public_ip'] = ip rc, output, error = module.run_command(['hostname', '-f']) hostname_f = output.strip() if rc == 0 else '' - hostname_values = [hostname_f, base_facts['nodename'], base_facts['fqdn']] + hostname_values = [hostname_f, ansible_facts['nodename'], ansible_facts['fqdn']] hostname = self.choose_hostname(hostname_values) common['hostname'] = hostname @@ -196,14 +197,14 @@ class OpenShiftFacts(): master = dict(api_use_ssl=True, api_port='8443', console_use_ssl=True, console_path='/console', console_port='8443', etcd_use_ssl=False, - etcd_port='4001') + etcd_port='4001', portal_net='172.30.17.0/24') defaults['master'] = master if 'node' in roles: node = dict(external_id=common['hostname'], pod_cidr='', labels={}, annotations={}) - node['resources_cpu'] = hardware_facts['processor_cores'] - node['resources_memory'] = int(int(hardware_facts['memtotal_mb']) * 1024 * 1024 * 0.75) + node['resources_cpu'] = ansible_facts['processor_cores'] + node['resources_memory'] = int(int(ansible_facts['memtotal_mb']) * 1024 * 1024 * 0.75) defaults['node'] = node return defaults @@ -226,8 +227,7 @@ class OpenShiftFacts(): def query_metadata(self, metadata_url, headers=None, expect_json=False): r, info = fetch_url(module, metadata_url, headers=headers) if info['status'] != 200: - module.fail_json(msg='Failed to query metadata', result=r, - info=info) + raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable") if expect_json: return module.from_json(r.read()) else: @@ -252,40 +252,27 @@ class OpenShiftFacts(): def get_provider_metadata(self, metadata_url, supports_recursive=False, headers=None, expect_json=False): - if supports_recursive: - metadata = self.query_metadata(metadata_url, headers, expect_json) - else: - metadata = self.walk_metadata(metadata_url, headers, expect_json) + try: + if supports_recursive: + metadata = self.query_metadata(metadata_url, headers, expect_json) + else: + metadata = self.walk_metadata(metadata_url, headers, expect_json) + except OpenShiftFactsMetadataUnavailableError as e: + metadata = None return metadata - def get_hardware_facts(self): - if not hasattr(self, 'hardware_facts'): - self.hardware_facts = Hardware().populate() - return self.hardware_facts - - def get_base_facts(self): - if not hasattr(self, 'base_facts'): - self.base_facts = Facts().populate() - return self.base_facts - - def get_virt_facts(self): - if not hasattr(self, 'virt_facts'): - self.virt_facts = Virtual().populate() - return self.virt_facts - - def get_net_facts(self): - if not hasattr(self, 'net_facts'): - self.net_facts = Network(module).populate() - return self.net_facts + def get_ansible_facts(self): + if not hasattr(self, 'ansible_facts'): + self.ansible_facts = ansible_facts(module) + return self.ansible_facts def guess_host_provider(self): # TODO: cloud provider facts should probably be submitted upstream - virt_facts = self.get_virt_facts() - hardware_facts = self.get_hardware_facts() - product_name = hardware_facts['product_name'] - product_version = hardware_facts['product_version'] - virt_type = virt_facts['virtualization_type'] - virt_role = virt_facts['virtualization_role'] + ansible_facts = self.get_ansible_facts() + product_name = ansible_facts['product_name'] + product_version = ansible_facts['product_version'] + virt_type = ansible_facts['virtualization_type'] + virt_role = ansible_facts['virtualization_role'] provider = None metadata = None @@ -300,8 +287,9 @@ class OpenShiftFacts(): True) # Filter sshKeys and serviceAccounts from gce metadata - metadata['project']['attributes'].pop('sshKeys', None) - metadata['instance'].pop('serviceAccounts', None) + if metadata: + metadata['project']['attributes'].pop('sshKeys', None) + metadata['instance'].pop('serviceAccounts', None) elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version): provider = 'ec2' metadata_url = 'http://169.254.169.254/latest/meta-data/' @@ -310,12 +298,18 @@ class OpenShiftFacts(): provider = 'openstack' metadata_url = 'http://169.254.169.254/openstack/latest/meta_data.json' metadata = self.get_provider_metadata(metadata_url, True, None, True) - ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' - metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) - # Filter public_keys and random_seed from openstack metadata - metadata.pop('public_keys', None) - metadata.pop('random_seed', None) + if metadata: + ec2_compat_url = 'http://169.254.169.254/latest/meta-data/' + metadata['ec2_compat'] = self.get_provider_metadata(ec2_compat_url) + + # Filter public_keys and random_seed from openstack metadata + metadata.pop('public_keys', None) + metadata.pop('random_seed', None) + + if not metadata['ec2_compat']: + metadata = None + return dict(name=provider, metadata=metadata) def normalize_provider_facts(self, provider, metadata): @@ -479,4 +473,6 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.facts import * from ansible.module_utils.urls import * -main() + +if __name__ == '__main__': + main() diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index aa615df39..1b1210007 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -11,33 +11,67 @@ api_url: "{{ openshift_master_api_url | default(None) }}" api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" public_api_url: "{{ openshift_master_public_api_url | default(None) }}" + console_path: "{{ openshift_master_console_path | default(None) }}" console_port: "{{ openshift_master_console_port | default(None) }}" console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" + etcd_port: "{{ openshift_master_etcd_port | default(None) }}" etcd_use_ssl: "{{ openshift_master_etcd_use_ssl | default(None) }}" + portal_net: "{{ openshift_master_portal_net | default(None) }}" + +# TODO: These values need to be configurable +- name: Set dns OpenShift facts + openshift_facts: + role: 'dns' + local_facts: + ip: "{{ openshift.common.ip }}" + domain: local - name: Install OpenShift Master package yum: pkg=openshift-master state=installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +- name: Create certificate parent directory if it doesn't exist + file: + path: "{{ openshift_cert_parent_dir }}" + state: directory + +- name: Create config parent directory if it doesn't exist + file: + path: "{{ openshift_master_config | dirname }}" + state: directory + +# TODO: should probably use a template lookup for this +# TODO: should allow for setting --etcd, --kubernetes options +# TODO: recreate config if values change +- name: Use enterprise default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined +- name: Create master config + command: > + /usr/bin/openshift start master --write-config + --config={{ openshift_master_config }} + --portal-net={{ openshift.master.portal_net }} + --master={{ openshift.master.api_url }} + --public-master={{ openshift.master.public_api_url }} + --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://0.0.0.0:{{ openshift.master.api_port }} + {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} + {{ ('--nodes=' ~ openshift_node_ips | join(',')) if openshift_node_ips is defined else '' }} + args: + chdir: "{{ openshift_cert_parent_dir }}" + creates: "{{ openshift_master_config }}" -# TODO: We should pre-generate the master config and point to the generated -# config rather than setting command line flags here - name: Configure OpenShift settings lineinfile: dest: /etc/sysconfig/openshift-master regexp: '^OPTIONS=' - line: "OPTIONS=\"--master={{ openshift.common.hostname }} --public-master={{ openshift.common.public_hostname }} {% if openshift_node_ips %} --nodes={{ openshift_node_ips | join(',') }} {% endif %} --loglevel={{ openshift.master.debug_level }}\"" - notify: - - restart openshift-master - -# TODO: should this be populated by a fact based on the deployment type -# (origin, online, enterprise)? -- name: Set default registry url - lineinfile: - dest: /etc/sysconfig/openshift-master - regexp: '^IMAGES=' - line: "IMAGES={{ openshift_registry_url }}" - when: openshift_registry_url is defined + line: "OPTIONS=\"--config={{ openshift_master_config }} --loglevel={{ openshift.master.debug_level }}\"" notify: - restart openshift-master @@ -53,6 +87,6 @@ # TODO: Update this file if the contents of the source file are not present in # the dest file, will need to make sure to ignore things that could be added - name: Configure root user kubeconfig - command: cp /var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig /root/.kube/.kubeconfig + command: cp {{ openshift_cert_dir }}/openshift-client/.kubeconfig /root/.kube/.kubeconfig args: creates: /root/.kube/.kubeconfig diff --git a/roles/openshift_master/vars/main.yml b/roles/openshift_master/vars/main.yml new file mode 100644 index 000000000..c52d957ac --- /dev/null +++ b/roles/openshift_master/vars/main.yml @@ -0,0 +1,5 @@ +--- +openshift_master_config: /etc/openshift/master.yaml +openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca" +openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt" +openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index e3c04585b..3d56bdd67 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -13,17 +13,22 @@ failed_when: not result.stat.exists register: result with_items: - - "{{ cert_path }}" - - "{{ cert_path }}/cert.crt" - - "{{ cert_path }}/key.key" - - "{{ cert_path }}/.kubeconfig" - - "{{ cert_path }}/server.crt" - - "{{ cert_path }}/server.key" - - "{{ cert_parent_path }}/ca/cert.crt" - #- "{{ cert_path }}/node.yaml" + - "{{ openshift_node_cert_dir }}" + - "{{ openshift_node_cert_dir }}/ca.crt" + - "{{ openshift_node_cert_dir }}/client.crt" + - "{{ openshift_node_cert_dir }}/client.key" + - "{{ openshift_node_cert_dir }}/.kubeconfig" + - "{{ openshift_node_cert_dir }}/node-config.yaml" + - "{{ openshift_node_cert_dir }}/server.crt" + - "{{ openshift_node_cert_dir }}/server.key" - name: Install OpenShift Node package yum: pkg=openshift-node state=installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed # --create-certs=false is a temporary workaround until # https://github.com/openshift/origin/pull/1361 is merged upstream and it is @@ -32,16 +37,7 @@ lineinfile: dest: /etc/sysconfig/openshift-node regexp: '^OPTIONS=' - line: "OPTIONS=\"--hostname={{ openshift.common.hostname }} --loglevel={{ openshift.node.debug_level }} --create-certs=false\"" - notify: - - restart openshift-node - -- name: Set default registry url - lineinfile: - dest: /etc/sysconfig/openshift-node - regexp: '^IMAGES=' - line: "IMAGES={{ openshift_registry_url }}" - when: openshift_registry_url is defined + line: "OPTIONS=\"--loglevel={{ openshift.node.debug_level }} --config={{ openshift_node_cert_dir }}/node-config.yaml\"" notify: - restart openshift-node diff --git a/roles/openshift_node/vars/main.yml b/roles/openshift_node/vars/main.yml new file mode 100644 index 000000000..c6be83139 --- /dev/null +++ b/roles/openshift_node/vars/main.yml @@ -0,0 +1,2 @@ +--- +openshift_node_cert_dir: /etc/openshift/node diff --git a/roles/openshift_register_nodes/defaults/main.yml b/roles/openshift_register_nodes/defaults/main.yml index 3501e8922..a0befab44 100644 --- a/roles/openshift_register_nodes/defaults/main.yml +++ b/roles/openshift_register_nodes/defaults/main.yml @@ -1,5 +1,2 @@ --- openshift_kube_api_version: v1beta1 -openshift_cert_dir: openshift.local.certificates -openshift_cert_dir_parent: /var/lib/openshift -openshift_cert_dir_abs: "{{ openshift_cert_dir_parent ~ '/' ~ openshift_cert_dir }}" diff --git a/roles/openshift_register_nodes/library/kubernetes_register_node.py b/roles/openshift_register_nodes/library/kubernetes_register_node.py index 8ebeb087a..1ec977716 100755 --- a/roles/openshift_register_nodes/library/kubernetes_register_node.py +++ b/roles/openshift_register_nodes/library/kubernetes_register_node.py @@ -97,10 +97,8 @@ class ClientConfigException(Exception): class ClientConfig: def __init__(self, client_opts, module): - _, output, error = module.run_command(["/usr/bin/openshift", "ex", - "config", "view", "-o", - "json"] + client_opts, - check_rc = True) + kubectl = module.params['kubectl_cmd'] + _, output, error = module.run_command(kubectl + ["config", "view", "-o", "json"] + client_opts, check_rc = True) self.config = json.loads(output) if not (bool(self.config['clusters']) or @@ -146,6 +144,9 @@ class ClientConfig: def get_cluster_for_context(self, context): return self.get_value_for_context(context, 'cluster') + def get_namespace_for_context(self, context): + return self.get_value_for_context(context, 'namespace') + class Util: @staticmethod def remove_empty_elements(mapping): @@ -247,15 +248,15 @@ class Node: return Util.remove_empty_elements(node) def exists(self): - _, output, error = self.module.run_command(["/usr/bin/osc", "get", - "nodes"] + self.client_opts, - check_rc = True) + kubectl = self.module.params['kubectl_cmd'] + _, output, error = self.module.run_command(kubectl + ["get", "nodes"] + self.client_opts, check_rc = True) if re.search(self.module.params['name'], output, re.MULTILINE): return True return False def create(self): - cmd = ['/usr/bin/osc'] + self.client_opts + ['create', 'node', '-f', '-'] + kubectl = self.module.params['kubectl_cmd'] + cmd = kubectl + self.client_opts + ['create', '-f', '-'] rc, output, error = self.module.run_command(cmd, data=self.module.jsonify(self.get_node())) if rc != 0: @@ -273,24 +274,26 @@ class Node: def main(): module = AnsibleModule( - argument_spec = dict( - name = dict(required = True, type = 'str'), - host_ip = dict(type = 'str'), - hostnames = dict(type = 'list', default = []), - external_ips = dict(type = 'list', default = []), - internal_ips = dict(type = 'list', default = []), - api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 - choices = ['v1beta1', 'v1beta3']), - cpu = dict(type = 'str'), - memory = dict(type = 'str'), - labels = dict(type = 'dict', default = {}), # TODO: needs documented - annotations = dict(type = 'dict', default = {}), # TODO: needs documented - pod_cidr = dict(type = 'str'), # TODO: needs documented - external_id = dict(type = 'str'), # TODO: needs documented - client_config = dict(type = 'str'), # TODO: needs documented - client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented - client_context = dict(type = 'str', default = 'master'), # TODO: needs documented - client_user = dict(type = 'str', default = 'admin') # TODO: needs documented + argument_spec = dict( + name = dict(required = True, type = 'str'), + host_ip = dict(type = 'str'), + hostnames = dict(type = 'list', default = []), + external_ips = dict(type = 'list', default = []), + internal_ips = dict(type = 'list', default = []), + api_version = dict(type = 'str', default = 'v1beta1', # TODO: after kube rebase, we can default to v1beta3 + choices = ['v1beta1', 'v1beta3']), + cpu = dict(type = 'str'), + memory = dict(type = 'str'), + labels = dict(type = 'dict', default = {}), # TODO: needs documented + annotations = dict(type = 'dict', default = {}), # TODO: needs documented + pod_cidr = dict(type = 'str'), # TODO: needs documented + external_id = dict(type = 'str'), # TODO: needs documented + client_config = dict(type = 'str'), # TODO: needs documented + client_cluster = dict(type = 'str', default = 'master'), # TODO: needs documented + client_context = dict(type = 'str', default = 'default'), # TODO: needs documented + client_namespace = dict(type = 'str', default = 'default'), # TODO: needs documented + client_user = dict(type = 'str', default = 'system:openshift-client'), # TODO: needs documented + kubectl_cmd = dict(type = 'list', default = ['kubectl']) # TODO: needs documented ), mutually_exclusive = [ ['host_ip', 'external_ips'], @@ -333,14 +336,16 @@ def main(): client_cluster = module.params['client_cluster'] if config.has_cluster(client_cluster): - if client_cluster != config.get_cluster_for_context(client_cluster): + if client_cluster != config.get_cluster_for_context(client_context): client_opts.append("--cluster=%s" % client_cluster) else: module.fail_json(msg="Cluster %s not found in client config" % client_cluster) - # TODO: provide sane defaults for some (like hostname, externalIP, - # internalIP, etc) + client_namespace = module.params['client_namespace'] + if client_namespace != config.get_namespace_for_context(client_context): + client_opts.append("--namespace=%s" % client_namespace) + node = Node(module, client_opts, module.params['api_version'], module.params['name'], module.params['host_ip'], module.params['hostnames'], module.params['external_ips'], diff --git a/roles/openshift_register_nodes/tasks/main.yml b/roles/openshift_register_nodes/tasks/main.yml index 7319b88b1..85f490f70 100644 --- a/roles/openshift_register_nodes/tasks/main.yml +++ b/roles/openshift_register_nodes/tasks/main.yml @@ -3,53 +3,37 @@ # TODO: recreate master/node configs if settings that affect the configs # change (hostname, public_hostname, ip, public_ip, etc) -# TODO: create a failed_when condition -- name: Create node server certificates - command: > - /usr/bin/openshift admin create-server-cert - --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/server.key - --hostnames={{ [item.openshift.common.hostname, - item.openshift.common.public_hostname]|unique|join(",") }} - args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/server.crt" - with_items: openshift_nodes - register: server_cert_result - -# TODO: create a failed_when condition -- name: Create node client certificates - command: > - /usr/bin/openshift admin create-node-cert - --overwrite=false - --cert={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt - --key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key - --node-name={{ item.openshift.common.hostname }} - args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/cert.crt" - with_items: openshift_nodes - register: node_cert_result +# TODO: use a template lookup here # TODO: create a failed_when condition -- name: Create kubeconfigs for nodes +- name: Use enterprise default for openshift_registry_url if not set + set_fact: + openshift_registry_url: "openshift3_beta/ose-${component}:${version}" + when: openshift.common.deployment_type == 'enterprise' and openshift_registry_url is not defined +- name: Create node config command: > - /usr/bin/openshift admin create-kubeconfig - --client-certificate={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/cert.crt - --client-key={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/key.key - --kubeconfig={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}/.kubeconfig - --master={{ openshift.master.api_url }} - --public-master={{ openshift.master.public_api_url }} + /usr/bin/openshift admin create-node-config + --node-dir={{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }} + --node={{ item.openshift.common.hostname }} + --hostnames={{ [item.openshift.common.hostname, item.openshift.common.public_hostname]|unique|join(",") }} + --dns-domain={{ openshift.dns.domain }} + --dns-ip={{ openshift.dns.ip }} + --master={{ openshift.master.api_url }} + --signer-key={{ openshift_master_ca_key }} + --signer-cert={{ openshift_master_ca_cert }} + --certificate-authority={{ openshift_master_ca_cert }} + --signer-serial={{ openshift_master_ca_dir }}/serial.txt + --node-client-certificate-authority={{ openshift_master_ca_cert }} + {{ ('--images=' ~ openshift_registry_url) if openshift_registry_url is defined else '' }} + --listen=https://0.0.0.0:10250 args: - chdir: "{{ openshift_cert_dir_parent }}" - creates: "{{ openshift_cert_dir_abs }}/node-{{ item.openshift.common.hostname }}/.kubeconfig" + chdir: "{{ openshift_cert_parent_dir }}" + creates: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}" with_items: openshift_nodes - register: kubeconfig_result - name: Register unregistered nodes kubernetes_register_node: - client_user: openshift-client + kubectl_cmd: ['openshift', 'kube'] name: "{{ item.openshift.common.hostname }}" api_version: "{{ openshift_kube_api_version }}" cpu: "{{ item.openshift.node.resources_cpu | default(None) }}" @@ -61,7 +45,5 @@ external_id: "{{ item.openshift.node.external_id }}" # TODO: support customizing other attributes such as: client_config, # client_cluster, client_context, client_user - # TODO: update for v1beta3 changes after rebase: hostnames, external_ips, - # internal_ips, external_id with_items: openshift_nodes register: register_result diff --git a/roles/openshift_register_nodes/vars/main.yml b/roles/openshift_register_nodes/vars/main.yml new file mode 100644 index 000000000..bd497f08f --- /dev/null +++ b/roles/openshift_register_nodes/vars/main.yml @@ -0,0 +1,7 @@ +--- +openshift_cert_parent_dir: /var/lib/openshift +openshift_cert_relative_dir: openshift.local.certificates +openshift_cert_dir: "{{ openshift_cert_parent_dir }}/{{ openshift_cert_relative_dir }}" +openshift_master_ca_dir: "{{ openshift_cert_dir }}/ca" +openshift_master_ca_cert: "{{ openshift_master_ca_dir }}/cert.crt" +openshift_master_ca_key: "{{ openshift_master_ca_dir }}/key.key" diff --git a/roles/openshift_repos/README.md b/roles/openshift_repos/README.md index 6713e11fc..6bbedd839 100644 --- a/roles/openshift_repos/README.md +++ b/roles/openshift_repos/README.md @@ -14,7 +14,7 @@ Role Variables | Name | Default value | | |-------------------------------|---------------|----------------------------------------------| -| openshift_deployment_type | online | Possible values enterprise, origin, online | +| openshift_deployment_type | None | Possible values enterprise, origin, online | | openshift_additional_repos | {} | TODO | Dependencies diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml index 1730207f4..7c5a14cd7 100644 --- a/roles/openshift_repos/defaults/main.yaml +++ b/roles/openshift_repos/defaults/main.yaml @@ -1,7 +1,2 @@ --- -# TODO: once we are able to configure/deploy origin using the openshift roles, -# then we should default to origin - -# TODO: push the defaulting of these values to the openshift_facts module -openshift_deployment_type: online openshift_additional_repos: {} diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta deleted file mode 100644 index 7b40671a4..000000000 --- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-beta +++ /dev/null @@ -1,61 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT -kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A -BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo -gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P -xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D -FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 -Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i -QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm -G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt -0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR -fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB -tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv -bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT -ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy -6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ -OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 -0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc -MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u -QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE -Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 -DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 -B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH -V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT -CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== -=21pb ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG -package. Questions about this key should be sent to security@redhat.com. - - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.0.6 (GNU/Linux) -Comment: For info see http://www.gnupg.org - -mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp -Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd -LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi -UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe -II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW -QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz -+AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 -VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI -mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg -SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX -BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 -F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF -AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q -0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc -RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI -JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR -xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU -ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 -WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI -RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL -yACfb68fBd2pWEzLKsOk9imIobHHpzE= -=gpIn ------END PGP PUBLIC KEY BLOCK----- diff --git a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release deleted file mode 100644 index 0f83b622d..000000000 --- a/roles/openshift_repos/files/online/RPM-GPG-KEY-redhat-release +++ /dev/null @@ -1,63 +0,0 @@ -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is used for packages in Red Hat -products shipped after November 2009, and for all updates to those -products. - -Questions about this key should be sent to security@redhat.com. - -pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF -0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF -0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c -u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh -XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H -5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW -9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj -/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 -PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY -HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF -buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB -tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 -LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK -CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC -2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf -C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 -un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E -0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE -IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh -8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL -Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki -JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 -OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq -dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== -=zbHE ------END PGP PUBLIC KEY BLOCK----- -The following public key can be used to verify RPM packages built and -signed by Red Hat, Inc. This key is a supporting (auxiliary) key for -Red Hat products shipped after November 2006 and for all updates to -those products. - -Questions about this key should be sent to security@redhat.com. - ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1.2.6 (GNU/Linux) - -mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT -VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A -UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C -yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu -MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg -Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z -z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 -eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq -SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg -SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC -AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg -ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX -=d/bm ------END PGP PUBLIC KEY BLOCK----- - diff --git a/roles/openshift_repos/files/online/epel7-kubernetes.repo b/roles/openshift_repos/files/online/epel7-kubernetes.repo deleted file mode 100644 index 1deae2939..000000000 --- a/roles/openshift_repos/files/online/epel7-kubernetes.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-epel7-kubernetes] -name=Copr repo for epel7-kubernetes owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/epel7-kubernetes/epel-7-$basearch/ -skip_if_unavailable=True -gpgcheck=0 -enabled=1 diff --git a/roles/openshift_repos/files/online/epel7-openshift.repo b/roles/openshift_repos/files/online/epel7-openshift.repo deleted file mode 100644 index c7629872d..000000000 --- a/roles/openshift_repos/files/online/epel7-openshift.repo +++ /dev/null @@ -1,6 +0,0 @@ -[maxamillion-origin-next] -name=Copr repo for origin-next owned by maxamillion -baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ -skip_if_unavailable=False -gpgcheck=0 -enabled=1 diff --git a/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta new file mode 100644 index 000000000..7b40671a4 --- /dev/null +++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-beta @@ -0,0 +1,61 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBEmkAzABEAC2/c7bP1lHQ3XScxbIk0LQWe1YOiibQBRLwf8Si5PktgtuPibT +kKpZjw8p4D+fM7jD1WUzUE0X7tXg2l/eUlMM4dw6XJAQ1AmEOtlwSg7rrMtTvM0A +BEtI7Km6fC6sU6RtBMdcqD1cH/6dbsfh8muznVA7UlX+PRBHVzdWzj6y8h84dBjo +gzcbYu9Hezqgj/lLzicqsSZPz9UdXiRTRAIhp8V30BD8uRaaa0KDDnD6IzJv3D9P +xQWbFM4Z12GN9LyeZqmD7bpKzZmXG/3drvfXVisXaXp3M07t3NlBa3Dt8NFIKZ0D +FRXBz5bvzxRVmdH6DtkDWXDPOt+Wdm1rZrCOrySFpBZQRpHw12eo1M1lirANIov7 +Z+V1Qh/aBxj5EUu32u9ZpjAPPNtQF6F/KjaoHHHmEQAuj4DLex4LY646Hv1rcv2i +QFuCdvLKQGSiFBrfZH0j/IX3/0JXQlZzb3MuMFPxLXGAoAV9UP/Sw/WTmAuTzFVm +G13UYFeMwrToOiqcX2VcK0aC1FCcTP2z4JW3PsWvU8rUDRUYfoXovc7eg4Vn5wHt +0NBYsNhYiAAf320AUIHzQZYi38JgVwuJfFu43tJZE4Vig++RQq6tsEx9Ftz3EwRR +fJ9z9mEvEiieZm+vbOvMvIuimFVPSCmLH+bI649K8eZlVRWsx3EXCVb0nQARAQAB +tDBSZWQgSGF0LCBJbmMuIChiZXRhIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0LmNv +bT6JAjYEEwECACAFAkpSM+cCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCT +ioDK8hVB6/9tEAC0+KmzeKceXQ/GTUoU6jy9vtkFCFrmv+c7ol4XpdTt0QhqBOwy +6m2mKWwmm8KfYfy0cADQ4y/EcoXl7FtFBwYmkCuEQGXhTDn9DvVjhooIq59LEMBQ +OW879RwwzRIZ8ebbjMUjDPF5MfPQqP2LBu9N4KvXlZp4voykwuuaJ+cbsKZR6pZ6 +0RQKPHKP+NgUFC0fff7XY9cuOZZWFAeKRhLN2K7bnRHKxp+kELWb6R9ZfrYwZjWc +MIPbTd1khE53L4NTfpWfAnJRtkPSDOKEGVlVLtLq4HEAxQt07kbslqISRWyXER3u +QOJj64D1ZiIMz6t6uZ424VE4ry9rBR0Jz55cMMx5O/ni9x3xzFUgH8Su2yM0r3jE +Rf24+tbOaPf7tebyx4OKe+JW95hNVstWUDyGbs6K9qGfI/pICuO1nMMFTo6GqzQ6 +DwLZvJ9QdXo7ujEtySZnfu42aycaQ9ZLC2DOCQCUBY350Hx6FLW3O546TAvpTfk0 +B6x+DV7mJQH7MGmRXQsE7TLBJKjq28Cn4tVp04PmybQyTxZdGA/8zY6pPl6xyVMH +V68hSBKEVT/rlouOHuxfdmZva1DhVvUC6Xj7+iTMTVJUAq/4Uyn31P1OJmA2a0PT +CAqWkbJSgKFccsjPoTbLyxhuMSNkEZFHvlZrSK9vnPzmfiRH0Orx3wYpMQ== +=21pb +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. for this beta using `rpm -K' using the GNU GPG +package. Questions about this key should be sent to security@redhat.com. + + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.0.6 (GNU/Linux) +Comment: For info see http://www.gnupg.org + +mQGiBDySTqsRBACzc7xuCIp10oj5B2PAV4XzDeVxprv/WTMreSNSK+iC0bEz0IBp +Vnn++qtyiXfH+bGIE9jqZgIEnpttWhUOaU5LhcLFzy+m8NWfngIFP9QfGmGAe9Gd +LFeAdhj4RmSG/vgr7vDd83Hz22dv403Ar/sliWO4vDOrMmZBG57WGYTWtwCgkMsi +UUQuJ6slbzKn82w+bYxOlL0EAIylWJGaTkKOTL5DqVR3ik9aT0Dt3FNVYiuhcKBe +II4E3KOIVA9kO8in1IZjx2gs6K2UV+GsoAVANdfKL7l9O+k+J8OxhE74oycvYJxW +QzCgXMZkNcvW5wyXwEMcr6TVd/5BGztcMw8oT3/l2MtAEG/vn1XaWToRSO1XDMDz ++AjUA/4m0mTkN8S4wjzJG8lqN7+quW3UOaiCe8J3SFrrrhE0XbY9cTJI/9nuXHU1 +VjqOSmXQYH2Db7UOroFTBiWhlAedA4O4yuK52AJnvSsHbnJSEmn9rpo5z1Q8F+qI +mDlzriJdrIrVLeDiUeTlpH3kpG38D7007GhXBV72k1gpMoMcpbQ3UmVkIEhhdCwg +SW5jLiAoQmV0YSBUZXN0IFNvZnR3YXJlKSA8cmF3aGlkZUByZWRoYXQuY29tPohX +BBMRAgAXBQI8l5p/BQsHCgMEAxUDAgMWAgECF4AACgkQ/TcmiYl9oHqdeQCfZjw4 +F9sir3XfRAjVe9kYNcQ8hnIAn0WgyT7H5RriWYTOCfauOmd+cAW4iEYEEBECAAYF +AjyXmqQACgkQIZGAzdtCpg5nDQCfepuRUyuVJvhuQkPWySETYvRw+WoAnjAWhx6q +0npMx4OE1JGFi8ymKXktuQENBDySTq4QBADKL/mK7S8E3synxISlu7R6fUvu07Oc +RoX96n0Di6T+BS99hC44XzHjMDhUX2ZzVvYS88EZXoUDDkB/8g7SwZrOJ/QE1zrI +JmSVciNhSYWwqeT40Evs88ajZUfDiNbS/cSC6oui98iS4vxd7sE7IPY+FSx9vuAR +xOa9vBnJY/dx0wADBQQAosm+Iltt2uigC6LJzxNOoIdB5r0GqTC1o5sHCeNqXJhU +ExAG8m74uzMlYVLOpGZi4y4NwwAWvCWC0MWWnnu+LGFy1wKiJKRjhv5F+WkFutY5 +WHV5L44vp9jSIlBCRG+84jheTh8xqhndM9wOfPwWdYYu1vxrB8Tn6kA17PcYfHSI +RgQYEQIABgUCPJJergAKCRD9NyaJiX2geiCPAJ4nEM4NtI9Uj8lONDk6FU86PmoL +yACfb68fBd2pWEzLKsOk9imIobHHpzE= +=gpIn +-----END PGP PUBLIC KEY BLOCK----- diff --git a/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release new file mode 100644 index 000000000..0f83b622d --- /dev/null +++ b/roles/openshift_repos/files/online/gpg_keys/RPM-GPG-KEY-redhat-release @@ -0,0 +1,63 @@ +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is used for packages in Red Hat +products shipped after November 2009, and for all updates to those +products. + +Questions about this key should be sent to security@redhat.com. + +pub 4096R/FD431D51 2009-10-22 Red Hat, Inc. (release key 2) + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF +0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF +0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c +u7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh +XGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H +5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW +9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj +/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1 +PcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY +HVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF +buhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB +tDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0 +LmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK +CRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC +2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf +C/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5 +un3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E +0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE +IGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh +8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL +Ght5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki +JUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25 +OFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq +dzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw== +=zbHE +-----END PGP PUBLIC KEY BLOCK----- +The following public key can be used to verify RPM packages built and +signed by Red Hat, Inc. This key is a supporting (auxiliary) key for +Red Hat products shipped after November 2006 and for all updates to +those products. + +Questions about this key should be sent to security@redhat.com. + +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1.2.6 (GNU/Linux) + +mQGiBEVwDGkRBACwPhZIpvkjI8wV9sFTDoqyPLx1ub8Sd/w+YuI5Ovm49mvvEQVT +VLg8FgE5JlST59AbsLDyVtRa9CxIvN5syBVrWWWtHtDnnylFBcqG/A6J3bI4E9/A +UtSL5Zxbav0+utP6f3wOpxQrxc+WIDVgpurdBKAQ3dsobGBqypeX6FXZ5wCgou6C +yZpGIBqosJaDWLzNeOfb/70D/1thLkQyhW3JJ6cHCYJHNfBShvbLWBf6S231mgmu +MyMlt8Kmipc9bw+saaAkSkVsQ/ZbfjrWB7e5kbMruKLVrH+nGhamlHYUGyAPtsPg +Uj/NUSj5BmrCsOkMpn43ngTLssE9MLhSPj2nIHGFv9B+iVLvomDdwnaBRgQ1aK8z +z6MAA/406yf5yVJ/MlTWs1/68VwDhosc9BtU1V5IE0NXgZUAfBJzzfVzzKQq6zJ2 +eZsMLhr96wbsW13zUZt1ing+ulwh2ee4meuJq6h/971JspFY/XBhcfq4qCNqVjsq +SZnWoGdCO6J8CxPIemD2IUHzjoyyeEj3RVydup6pcWZAmhzkKrQzUmVkIEhhdCwg +SW5jLiAoYXV4aWxpYXJ5IGtleSkgPHNlY3VyaXR5QHJlZGhhdC5jb20+iF4EExEC +AB4FAkVwDGkCGwMGCwkIBwMCAxUCAwMWAgECHgECF4AACgkQRWiciC+mWOC1rQCg +ooNLCFOzNPcvhd9Za8C801HmnsYAniCw3yzrCqtjYnxDDxlufH0FVTwX +=d/bm +-----END PGP PUBLIC KEY BLOCK----- + diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo deleted file mode 100644 index cfe41f691..000000000 --- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-extras.repo +++ /dev/null @@ -1,23 +0,0 @@ -[oso-rhui-rhel-server-extras] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-extras-htb] -name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ -enabled=0 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta -failovermethod=priority -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo deleted file mode 100644 index ddc93193d..000000000 --- a/roles/openshift_repos/files/online/oso-rhui-rhel-7-server.repo +++ /dev/null @@ -1,21 +0,0 @@ -[oso-rhui-rhel-server-releases] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem - -[oso-rhui-rhel-server-releases-optional] -name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ - https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ -enabled=1 -gpgcheck=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -sslverify=False -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/epel7-openshift.repo b/roles/openshift_repos/files/online/repos/epel7-openshift.repo new file mode 100644 index 000000000..c7629872d --- /dev/null +++ b/roles/openshift_repos/files/online/repos/epel7-openshift.repo @@ -0,0 +1,6 @@ +[maxamillion-origin-next] +name=Copr repo for origin-next owned by maxamillion +baseurl=http://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ +skip_if_unavailable=False +gpgcheck=0 +enabled=1 diff --git a/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo new file mode 100644 index 000000000..cfe41f691 --- /dev/null +++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-extras.repo @@ -0,0 +1,23 @@ +[oso-rhui-rhel-server-extras] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-extras-htb] +name=OpenShift Online RHUI Mirror RH Enterprise Linux - Extras HTB +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-extras-htb/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta +failovermethod=priority +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo new file mode 100644 index 000000000..ddc93193d --- /dev/null +++ b/roles/openshift_repos/files/online/repos/oso-rhui-rhel-7-server.repo @@ -0,0 +1,21 @@ +[oso-rhui-rhel-server-releases] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem + +[oso-rhui-rhel-server-releases-optional] +name=OpenShift Online RHUI Mirror RH Enterprise Linux 7 - Optional +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ + https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-releases-optional/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +sslverify=False +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem diff --git a/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo new file mode 100644 index 000000000..b4215679f --- /dev/null +++ b/roles/openshift_repos/files/online/repos/rhel-7-libra-candidate.repo @@ -0,0 +1,11 @@ +[rhel-7-libra-candidate] +name=rhel-7-libra-candidate - \$basearch +baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ + https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ +gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted +skip_if_unavailable=True +gpgcheck=0 +enabled=1 +sslclientcert=/var/lib/yum/client-cert.pem +sslclientkey=/var/lib/yum/client-key.pem +sslverify=False diff --git a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo b/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo deleted file mode 100644 index b4215679f..000000000 --- a/roles/openshift_repos/files/online/rhel-7-libra-candidate.repo +++ /dev/null @@ -1,11 +0,0 @@ -[rhel-7-libra-candidate] -name=rhel-7-libra-candidate - \$basearch -baseurl=https://gce-mirror1.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ - https://mirror.ops.rhcloud.com/libra/rhel-7-libra-candidate/\$basearch/ -gpgkey=https://mirror.ops.rhcloud.com/libra/RPM-GPG-KEY-redhat-openshifthosted -skip_if_unavailable=True -gpgcheck=0 -enabled=1 -sslclientcert=/var/lib/yum/client-cert.pem -sslclientkey=/var/lib/yum/client-key.pem -sslverify=False diff --git a/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo new file mode 100644 index 000000000..0b21e0a65 --- /dev/null +++ b/roles/openshift_repos/files/origin/repos/maxamillion-origin-next-epel-7.repo @@ -0,0 +1,7 @@ +[maxamillion-origin-next] +name=Copr repo for origin-next owned by maxamillion +baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/ +skip_if_unavailable=True +gpgcheck=1 +gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg +enabled=1 diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index bb1551d37..12e98b7a1 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -10,10 +10,6 @@ - assert: that: openshift_deployment_type in known_openshift_deployment_types -# TODO: remove this when origin support actually works -- fail: msg="OpenShift Origin support is not currently enabled" - when: openshift_deployment_type == 'origin' - - name: Ensure libselinux-python is installed yum: pkg: libselinux-python @@ -36,17 +32,15 @@ path: "/etc/yum.repos.d/{{ item | basename }}" state: absent with_fileglob: - - '*/*' - when: not (item | search("/files/" + openshift_deployment_type + "/")) and (item | search(".repo$")) + - '*/repos/*' + when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) - name: Configure gpg keys if needed copy: src={{ item }} dest=/etc/pki/rpm-gpg/ with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | match("RPM-GPG-KEY-") + - "{{ openshift_deployment_type }}/gpg_keys/*" - name: Configure yum repositories copy: src={{ item }} dest=/etc/yum.repos.d/ with_fileglob: - - "{{ openshift_deployment_type }}/*" - when: item | basename | search(".*\.repo$") + - "{{ openshift_deployment_type }}/repos/*" diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2 index 7ea2c7460..2d9243545 100644 --- a/roles/openshift_repos/templates/yum_repo.j2 +++ b/roles/openshift_repos/templates/yum_repo.j2 @@ -1,4 +1,3 @@ -# {{ ansible_managed }} {% for repo in openshift_additional_repos %} [{{ repo.id }}] name={{ repo.name | default(repo.id) }} diff --git a/roles/openshift_sdn_master/tasks/main.yml b/roles/openshift_sdn_master/tasks/main.yml index f2d61043b..77e7a80ba 100644 --- a/roles/openshift_sdn_master/tasks/main.yml +++ b/roles/openshift_sdn_master/tasks/main.yml @@ -12,12 +12,21 @@ yum: pkg: openshift-sdn-master state: installed + register: install_result +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed + +# TODO: we should probably generate certs specifically for sdn - name: Configure openshift-sdn-master settings lineinfile: dest: /etc/sysconfig/openshift-sdn-master regexp: '^OPTIONS=' - line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }}\"" + line: "OPTIONS=\"-v={{ openshift.master_sdn.debug_level }} -etcd-endpoints={{ openshift_sdn_master_url}} + -etcd-cafile={{ openshift_cert_dir }}/ca/ca.crt + -etcd-certfile={{ openshift_cert_dir }}/openshift-client/cert.crt + -etcd-keyfile={{ openshift_cert_dir }}/openshift-client/key.key\"" notify: - restart openshift-sdn-master diff --git a/roles/openshift_sdn_node/tasks/main.yml b/roles/openshift_sdn_node/tasks/main.yml index 729c28879..c2329dd6f 100644 --- a/roles/openshift_sdn_node/tasks/main.yml +++ b/roles/openshift_sdn_node/tasks/main.yml @@ -9,9 +9,15 @@ yum: pkg: openshift-sdn-node state: installed + register: install_result + +- name: Reload systemd units + command: systemctl daemon-reload + when: install_result | changed # TODO: we are specifying -hostname= for OPTIONS as a workaround for # openshift-sdn-node not properly detecting the hostname. +# TODO: we should probably generate certs specifically for sdn - name: Configure openshift-sdn-node settings lineinfile: dest: /etc/sysconfig/openshift-sdn-node @@ -20,7 +26,10 @@ backrefs: yes with_items: - regex: '^(OPTIONS=)' - line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }}"' + line: '\1"-v={{ openshift.node_sdn.debug_level }} -hostname={{ openshift.common.hostname }} + -etcd-cafile={{ openshift_node_cert_dir }}/ca.crt + -etcd-certfile={{ openshift_node_cert_dir }}/client.crt + -etcd-keyfile={{ openshift_node_cert_dir }}/client.key\"' - regex: '^(MASTER_URL=)' line: '\1"{{ openshift_sdn_master_url }}"' - regex: '^(MINION_IP=)' diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 90588d2ae..9d0af497d 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -270,4 +270,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() -- cgit v1.2.3 From dbb252bc04a6488c1fde05dbc325b246fd4a651e Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 15 Apr 2015 20:52:38 -0400 Subject: Fixup typos --- playbooks/aws/openshift-cluster/launch.yml | 2 +- playbooks/gce/openshift-cluster/launch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index e7125ea0c..a0de00fc3 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -7,7 +7,7 @@ - vars.yml tasks: - fail: - msg: Deployment type not supported for libvirt provider yet + msg: Deployment type not supported for aws provider yet when: deployment_type == 'enterprise' - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml index 34a5a0b94..771f51e91 100644 --- a/playbooks/gce/openshift-cluster/launch.yml +++ b/playbooks/gce/openshift-cluster/launch.yml @@ -6,7 +6,7 @@ vars_files: - vars.yml tasks: - - fail: msg="Deployment type not supported for libvirt provider yet" + - fail: msg="Deployment type not supported for gce provider yet" when: deployment_type == 'enterprise' - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml -- cgit v1.2.3 From 735355f75ab24d14aadbc30ec334dadc789028db Mon Sep 17 00:00:00 2001 From: Troy Dawson Date: Thu, 16 Apr 2015 16:01:26 -0500 Subject: update tower ami image to latest libra-ops-rhel7 --- playbooks/aws/ansible-tower/launch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml index 4c29fa833..56235bc8a 100644 --- a/playbooks/aws/ansible-tower/launch.yml +++ b/playbooks/aws/ansible-tower/launch.yml @@ -6,7 +6,7 @@ vars: inst_region: us-east-1 - rhel7_ami: ami-a24e30ca + rhel7_ami: ami-906240f8 user_data_file: user_data.txt vars_files: -- cgit v1.2.3 From 0ecefd20d06e67823cb033d4ac7ec4b57e613af6 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 20 Apr 2015 23:45:15 -0400 Subject: Remove deployment-type tags --- playbooks/aws/openshift-cluster/tasks/launch_instances.yml | 3 +-- playbooks/gce/openshift-cluster/tasks/launch_instances.yml | 1 - playbooks/libvirt/openshift-cluster/templates/domain.xml | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 58b4082df..77ee25424 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -26,7 +26,6 @@ env: "{{ env }}" host-type: "{{ host_type }}" env-host-type: "{{ env_host_type }}" - deployment-type: "{{ deployment_type }}" register: ec2 - name: Add Name tag to instances @@ -39,7 +38,7 @@ Name: "{{ item.0 }}" - set_fact: - instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}, tag_deployment-type_{{ deployment_type }} + instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }} - name: Add new instances groups and variables add_host: diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index a68edefae..9a9848f05 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -15,7 +15,6 @@ - env-{{ cluster }} - host-type-{{ type }} - env-host-type-{{ cluster }}-openshift-{{ type }} - - deployment-type-{{ deployment_type }} register: gce - name: Add new instances to groups and set variables needed diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml index 7656249da..df200e374 100644 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml @@ -3,7 +3,6 @@ 1 - deployment-type-{{ deployment_type }} env-{{ cluster }} env-host-type-{{ cluster }}-openshift-{{ type }} host-type-{{ type }} -- cgit v1.2.3 From ba5ae4dbc7741af1963df36fd92bcd0af03c6b4f Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 16 Apr 2015 22:44:12 -0400 Subject: aws terminate playbook improvements - Reduce duplication in terminate playbooks between openshift-master and openshift-node (they both now just include playbooks/aws/terminate.yml - update openshift-cluster terminate playbook to include the new shared terminate playbook, also delete all cluster hosts at once instead of treating masters and nodes differently. - remove env, host-type and env-host-type tags from instance before terminating (since most users can't terminate, we are mostly just renaming instances to -terminate and stopping them, so this prevents "terminated" hosts from being returned by the dynamic inventory, at least after the cache is refreshed) --- playbooks/aws/openshift-cluster/terminate.yml | 16 ++----- playbooks/aws/openshift-master/terminate.yml | 55 +---------------------- playbooks/aws/openshift-node/terminate.yml | 55 +---------------------- playbooks/aws/terminate.yml | 64 +++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 121 deletions(-) create mode 100644 playbooks/aws/terminate.yml (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index 1d2b60594..617d0d456 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -5,22 +5,12 @@ vars_files: - vars.yml tasks: - - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-node + - set_fact: scratch_group=tag_env_{{ cluster_id }} - add_host: name: "{{ item }}" - groups: oo_nodes_to_terminate + groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" with_items: groups[scratch_group] | default([]) | difference(['localhost']) - - set_fact: scratch_group=tag_env-host-type_{{ cluster_id }}-openshift-master - - add_host: - name: "{{ item }}" - groups: oo_masters_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) - -- include: ../openshift-node/terminate.yml - -- include: ../openshift-master/terminate.yml +- include: ../terminate.yml diff --git a/playbooks/aws/openshift-master/terminate.yml b/playbooks/aws/openshift-master/terminate.yml index a790336b1..07d9961bc 100644 --- a/playbooks/aws/openshift-master/terminate.yml +++ b/playbooks/aws/openshift-master/terminate.yml @@ -1,55 +1,2 @@ --- -- name: Populate oo_masters_to_terminate host group - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_masters_to_terminate - add_host: name={{ item }} groups=oo_masters_to_terminate - with_items: oo_host_group_exp | default([]) - -- name: Gather dynamic inventory variables for hosts to terminate - hosts: oo_masters_to_terminate - gather_facts: no - -- name: Terminate instances - hosts: localhost - connection: local - gather_facts: no - vars: - host_vars: "{{ hostvars - | oo_select_keys(groups['oo_masters_to_terminate']) }}" - tasks: - - name: Terminate instances - ec2: - state: absent - instance_ids: ["{{ item.ec2_id }}"] - region: "{{ item.ec2_region }}" - ignore_errors: yes - register: ec2_term - with_items: host_vars - when: "'oo_masters_to_terminate' in groups" - - # Fail if any of the instances failed to terminate with an error other - # than 403 Forbidden - - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "'oo_masters_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" - with_items: ec2_term.results - - - name: Stop instance if termination failed - ec2: - state: stopped - instance_ids: ["{{ item.item.ec2_id }}"] - region: "{{ item.item.ec2_region }}" - register: ec2_stop - when: item.failed - with_items: ec2_term.results - when: "'oo_masters_to_terminate' in groups" - - - name: Rename stopped instances - ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present - args: - tags: - Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: ec2_stop.results - when: "'oo_masters_to_terminate' in groups" - +- include: ../terminate.yml diff --git a/playbooks/aws/openshift-node/terminate.yml b/playbooks/aws/openshift-node/terminate.yml index 40ae56f99..07d9961bc 100644 --- a/playbooks/aws/openshift-node/terminate.yml +++ b/playbooks/aws/openshift-node/terminate.yml @@ -1,55 +1,2 @@ --- -- name: Populate oo_nodes_to_terminate host group - hosts: localhost - gather_facts: no - tasks: - - name: Evaluate oo_nodes_to_terminate - add_host: name={{ item }} groups=oo_nodes_to_terminate - with_items: oo_host_group_exp | default([]) - -- name: Gather dynamic inventory variables for hosts to terminate - hosts: oo_nodes_to_terminate - gather_facts: no - -- name: Terminate instances - hosts: localhost - connection: local - gather_facts: no - vars: - host_vars: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_terminate']) }}" - tasks: - - name: Terminate instances - ec2: - state: absent - instance_ids: ["{{ item.ec2_id }}"] - region: "{{ item.ec2_region }}" - ignore_errors: yes - register: ec2_term - with_items: host_vars - when: "'oo_nodes_to_terminate' in groups" - - # Fail if any of the instances failed to terminate with an error other - # than 403 Forbidden - - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} - when: "'oo_nodes_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" - with_items: ec2_term.results - - - name: Stop instance if termination failed - ec2: - state: stopped - instance_ids: ["{{ item.item.ec2_id }}"] - region: "{{ item.item.ec2_region }}" - register: ec2_stop - when: item.failed - with_items: ec2_term.results - when: "'oo_nodes_to_terminate' in groups" - - - name: Rename stopped instances - ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present - args: - tags: - Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: ec2_stop.results - when: "'oo_nodes_to_terminate' in groups" - +- include: ../terminate.yml diff --git a/playbooks/aws/terminate.yml b/playbooks/aws/terminate.yml new file mode 100644 index 000000000..e9767b260 --- /dev/null +++ b/playbooks/aws/terminate.yml @@ -0,0 +1,64 @@ +--- +- name: Populate oo_hosts_to_terminate host group + hosts: localhost + gather_facts: no + tasks: + - name: Evaluate oo_hosts_to_terminate + add_host: name={{ item }} groups=oo_hosts_to_terminate + with_items: oo_host_group_exp | default([]) + +- name: Gather dynamic inventory variables for hosts to terminate + hosts: oo_hosts_to_terminate + gather_facts: no + +- name: Terminate instances + hosts: localhost + connection: local + gather_facts: no + vars: + host_vars: "{{ hostvars + | oo_select_keys(groups['oo_hosts_to_terminate']) }}" + tasks: + - name: Remove tags from instances + ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent + args: + tags: + env: "{{ item['ec2_tag_env'] }}" + host-type: "{{ item['ec2_tag_host-type'] }}" + env-host-type: "{{ item['ec2_tag_env-host-type'] }}" + with_items: host_vars + when: "'oo_hosts_to_terminate' in groups" + + - name: Terminate instances + ec2: + state: absent + instance_ids: ["{{ item.ec2_id }}"] + region: "{{ item.ec2_region }}" + ignore_errors: yes + register: ec2_term + with_items: host_vars + when: "'oo_hosts_to_terminate' in groups" + + # Fail if any of the instances failed to terminate with an error other + # than 403 Forbidden + - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }} + when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")" + with_items: ec2_term.results + + - name: Stop instance if termination failed + ec2: + state: stopped + instance_ids: ["{{ item.item.ec2_id }}"] + region: "{{ item.item.ec2_region }}" + register: ec2_stop + when: item.failed + with_items: ec2_term.results + when: "'oo_hosts_to_terminate' in groups" + + - name: Rename stopped instances + ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present + args: + tags: + Name: "{{ item.item.item.ec2_tag_Name }}-terminate" + with_items: ec2_stop.results + when: "'oo_hosts_to_terminate' in groups" -- cgit v1.2.3 From 71ff62e46f464916f57345f4945f2e28cf5cdcdc Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 21 Apr 2015 10:40:55 -0400 Subject: add vpc support to ec2 cluster, add more overrides for variables --- .../openshift-cluster/tasks/launch_instances.yml | 26 +++++++++++++++------- playbooks/aws/openshift-cluster/vars.yml | 15 +++++++++++++ 2 files changed, 33 insertions(+), 8 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 58b4082df..28582c84c 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -1,25 +1,35 @@ --- -# TODO: modify machine_image based on deployment_type - set_fact: - machine_type: "{{ lookup('env', 'ec2_instance_type') | default('m3.large', true) }}" - machine_image: "{{ lookup('env', 'ec2_ami') | default(deployment_vars[deployment_type].image, true) }}" - machine_region: "{{ lookup('env', 'ec2_region') | default(deployment_vars[deployment_type].region, true) }}" - machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}" created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" - security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}" env: "{{ cluster }}" - host_type: "{{ type }}" env_host_type: "{{ cluster }}-openshift-{{ type }}" + host_type: "{{ type }}" + machine_type: "{{ lookup('env', 'ec2_instance_type') + | default(deployment_vars[deployment_type].type, true) }}" + machine_image: "{{ lookup('env', 'ec2_ami') + | default(deployment_vars[deployment_type].image, true) }}" + machine_region: "{{ lookup('env', 'ec2_region') + | default(deployment_vars[deployment_type].region, true) }}" + machine_keypair: "{{ lookup('env', 'ec2_keypair') + | default(deployment_vars[deployment_type].keypair, true) }}" + machine_subnet: "{{ lookup('env', 'ec2_vpc_subnet') + | default(deployment_vars[deployment_type].vpc_subnet, true) }}" + machine_public_ip: "{{ lookup('env', 'ec2_public_ip') + | default(deployment_vars[deployment_type].assign_public_ip, true) }}" + security_groups: "{{ lookup('env', 'ec2_security_groups') + | default(deployment_vars[deployment_type].security_groups, true) }}" - name: Launch instance(s) ec2: state: present region: "{{ machine_region }}" keypair: "{{ machine_keypair }}" - group: "{{ security_group }}" + group: "{{ security_groups }}" instance_type: "{{ machine_type }}" image: "{{ machine_image }}" count: "{{ instances | oo_len }}" + vpc_subnet_id: "{{ machine_subnet | default(omit, true) }}" + assign_public_ip: "{{ machine_public_ip | default(omit, true) }}" wait: yes instance_tags: created-by: "{{ created_by }}" diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index f0df3d6f5..bd6215869 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -6,15 +6,30 @@ deployment_vars: region: us-east-1 ssh_user: fedora sudo: yes + keypair: libra + type: m3.large + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: online: # private ami image: ami-307b3658 region: us-east-1 ssh_user: root sudo: no + keypair: libra + type: m3.large + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: enterprise: # rhel-7.1, requires cloud access subscription image: ami-10663b78 region: us-east-1 ssh_user: ec2-user sudo: yes + keypair: libra + type: m3.large + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: -- cgit v1.2.3 From 3d34312d6414e10595a7e7200ee6735a01632b41 Mon Sep 17 00:00:00 2001 From: Wesley Hearn Date: Wed, 22 Apr 2015 10:53:41 -0400 Subject: Update openshift-cluster/vars for online defaults --- playbooks/aws/openshift-cluster/vars.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'playbooks/aws') diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index bd6215869..f87e7aba3 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -13,15 +13,15 @@ deployment_vars: assign_public_ip: online: # private ami - image: ami-307b3658 + image: ami-906240f8 region: us-east-1 ssh_user: root sudo: no - keypair: libra + keypair: mmcgrath_libra type: m3.large - security_groups: [ 'public' ] - vpc_subnet: - assign_public_ip: + security_groups: [ 'int-v3' ] + vpc_subnet: subnet-987c0def + assign_public_ip: yes enterprise: # rhel-7.1, requires cloud access subscription image: ami-10663b78 -- cgit v1.2.3 From 6792e2c58ea21bd67a36ff4571301782c9f64009 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 22 Apr 2015 14:10:15 -0400 Subject: Allow variable overriding for ec2 deployment_vars - users can now override the deployment_vars variables with the assocated ec2_* variables - added deployment_type and env specific vars files that load some ec2_* overrides - added the ability to search for amis by ami_name - this allows us to specify a base name with a wildcard to have the playbook choose the latest available image for that image name - added a copy of the ec2_find_ami module that will be in ansible 2.0 until we can make ansible 2.0 a requirement. --- filter_plugins/oo_filters.py | 83 +++--- playbooks/aws/openshift-cluster/launch.yml | 1 + .../aws/openshift-cluster/library/ec2_ami_find.py | 302 +++++++++++++++++++++ .../openshift-cluster/tasks/launch_instances.yml | 76 ++++-- playbooks/aws/openshift-cluster/vars.defaults.yml | 1 + .../aws/openshift-cluster/vars.online.int.yml | 9 + .../aws/openshift-cluster/vars.online.prod.yml | 9 + .../aws/openshift-cluster/vars.online.stage.yml | 9 + playbooks/aws/openshift-cluster/vars.yml | 13 +- 9 files changed, 445 insertions(+), 58 deletions(-) create mode 100644 playbooks/aws/openshift-cluster/library/ec2_ami_find.py create mode 100644 playbooks/aws/openshift-cluster/vars.defaults.yml create mode 100644 playbooks/aws/openshift-cluster/vars.online.int.yml create mode 100644 playbooks/aws/openshift-cluster/vars.online.prod.yml create mode 100644 playbooks/aws/openshift-cluster/vars.online.stage.yml (limited to 'playbooks/aws') diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index cf30cde9a..d22b6d188 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -1,14 +1,17 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom filters for use in openshift-ansible +''' -from ansible import errors, runner -import json +from ansible import errors +from operator import itemgetter import pdb -import re def oo_pdb(arg): - ''' This pops you into a pdb instance where arg is the data passed in from the filter. + ''' This pops you into a pdb instance where arg is the data passed in + from the filter. Ex: "{{ hostvars | oo_pdb }}" ''' pdb.set_trace() @@ -21,7 +24,8 @@ def oo_len(arg): return len(arg) def get_attr(data, attribute=None): - ''' This looks up dictionary attributes of the form a.b.c and returns the value. + ''' This looks up dictionary attributes of the form a.b.c and returns + the value. Ex: data = {'a': {'b': {'c': 5}}} attribute = "a.b.c" returns 5 @@ -41,12 +45,13 @@ def oo_flatten(data): if not issubclass(type(data), list): raise errors.AnsibleFilterError("|failed expects to flatten a List") - return [ item for sublist in data for item in sublist ] + return [item for sublist in data for item in sublist] -def oo_collect(data, attribute=None, filters={}): - ''' This takes a list of dict and collects all attributes specified into a list - If filter is specified then we will include all items that match _ALL_ of filters. +def oo_collect(data, attribute=None, filters=None): + ''' This takes a list of dict and collects all attributes specified into a + list If filter is specified then we will include all items that match + _ALL_ of filters. Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return {'a':2, 'z': 'z'}, # True, return {'a':3, 'z': 'z'}, # True, return @@ -56,15 +61,18 @@ def oo_collect(data, attribute=None, filters={}): filters = {'z': 'z'} returns [1, 2, 3] ''' - if not issubclass(type(data), list): raise errors.AnsibleFilterError("|failed expects to filter on a List") if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") - if filters: - retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ] + if filters is not None: + if not issubclass(type(filters), dict): + raise errors.AnsibleFilterError("|fialed expects filter to be a" + " dict") + retval = [get_attr(d, attribute) for d in data if ( + all([d[key] == filters[key] for key in filters]))] else: retval = [get_attr(d, attribute) for d in data] @@ -78,7 +86,7 @@ def oo_select_keys(data, keys): ''' if not issubclass(type(data), dict): - raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary") + raise errors.AnsibleFilterError("|failed expects to filter on a dict") if not issubclass(type(keys), list): raise errors.AnsibleFilterError("|failed expects first param is a list") @@ -98,30 +106,43 @@ def oo_prepend_strings_in_list(data, prepend): if not issubclass(type(data), list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not all(isinstance(x, basestring) for x in data): - raise errors.AnsibleFilterError("|failed expects first param is a list of strings") + raise errors.AnsibleFilterError("|failed expects first param is a list" + " of strings") retval = [prepend + s for s in data] return retval -def oo_get_deployment_type_from_groups(data): - ''' This takes a list of groups and returns the associated - deployment-type +def oo_ami_selector(data, image_name): + ''' This takes a list of amis and an image name and attempts to return + the latest ami. ''' if not issubclass(type(data), list): raise errors.AnsibleFilterError("|failed expects first param is a list") - regexp = re.compile('^tag_deployment-type[-_]') - matches = filter(regexp.match, data) - if len(matches) > 0: - return regexp.sub('', matches[0]) - return "Unknown" -class FilterModule (object): + if not data: + return None + else: + if image_name is None or not image_name.endswith('_*'): + ami = sorted(data, key=itemgetter('name'), reverse=True)[0] + return ami['ami_id'] + else: + ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data] + ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] + return ami['ami_id'] + +# disabling pylint checks for too-few-public-methods and no-self-use since we +# need to expose a FilterModule object that has a filters method that returns +# a mapping of filter names to methods. +# pylint: disable=too-few-public-methods, no-self-use +class FilterModule(object): + ''' FilterModule ''' def filters(self): + ''' returns a mapping of filters to methods ''' return { - "oo_select_keys": oo_select_keys, - "oo_collect": oo_collect, - "oo_flatten": oo_flatten, - "oo_len": oo_len, - "oo_pdb": oo_pdb, - "oo_prepend_strings_in_list": oo_prepend_strings_in_list, - "oo_get_deployment_type_from_groups": oo_get_deployment_type_from_groups - } + "oo_select_keys": oo_select_keys, + "oo_collect": oo_collect, + "oo_flatten": oo_flatten, + "oo_len": oo_len, + "oo_pdb": oo_pdb, + "oo_prepend_strings_in_list": oo_prepend_strings_in_list, + "oo_ami_selector": oo_ami_selector + } diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml index a0de00fc3..3eb5496e4 100644 --- a/playbooks/aws/openshift-cluster/launch.yml +++ b/playbooks/aws/openshift-cluster/launch.yml @@ -5,6 +5,7 @@ gather_facts: no vars_files: - vars.yml + - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml] tasks: - fail: msg: Deployment type not supported for aws provider yet diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py new file mode 100644 index 000000000..29e594a65 --- /dev/null +++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +#pylint: skip-file +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_ami_find +version_added: 2.0 +short_description: Searches for AMIs to obtain the AMI ID and other information +description: + - Returns list of matching AMIs with AMI ID, along with other useful information + - Can search AMIs with different owners + - Can search by matching tag(s), by AMI name and/or other criteria + - Results can be sorted and sliced +author: Tom Bamford +notes: + - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. + - See the example below for a suggestion of how to search by distro/release. +options: + region: + description: + - The AWS region to use. + required: true + aliases: [ 'aws_region', 'ec2_region' ] + owner: + description: + - Search AMIs owned by the specified owner + - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' + - If not specified, all EC2 AMIs in the specified region will be searched. + - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. + required: false + default: null + ami_id: + description: + - An AMI ID to match. + default: null + required: false + ami_tags: + description: + - A hash/dictionary of tags to match for the AMI. + default: null + required: false + architecture: + description: + - An architecture type to match (e.g. x86_64). + default: null + required: false + hypervisor: + description: + - A hypervisor type type to match (e.g. xen). + default: null + required: false + is_public: + description: + - Whether or not the image(s) are public. + choices: ['yes', 'no'] + default: null + required: false + name: + description: + - An AMI name to match. + default: null + required: false + platform: + description: + - Platform type to match. + default: null + required: false + sort: + description: + - Optional attribute which with to sort the results. + - If specifying 'tag', the 'tag_name' parameter is required. + choices: ['name', 'description', 'tag'] + default: null + required: false + sort_tag: + description: + - Tag name with which to sort results. + - Required when specifying 'sort=tag'. + default: null + required: false + sort_order: + description: + - Order in which to sort results. + - Only used when the 'sort' parameter is specified. + choices: ['ascending', 'descending'] + default: 'ascending' + required: false + sort_start: + description: + - Which result to start with (when sorting). + - Corresponds to Python slice notation. + default: null + required: false + sort_end: + description: + - Which result to end with (when sorting). + - Corresponds to Python slice notation. + default: null + required: false + state: + description: + - AMI state to match. + default: 'available' + required: false + virtualization_type: + description: + - Virtualization type to match (e.g. hvm). + default: null + required: false + no_result_action: + description: + - What to do when no results are found. + - "'success' reports success and returns an empty array" + - "'fail' causes the module to report failure" + choices: ['success', 'fail'] + default: 'success' + required: false +requirements: + - boto + +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Search for the AMI tagged "project:website" +- ec2_ami_find: + owner: self + tags: + project: website + no_result_action: fail + register: ami_find + +# Search for the latest Ubuntu 14.04 AMI +- ec2_ami_find: + name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" + owner: 099720109477 + sort: name + sort_order: descending + sort_end: 1 + register: ami_find + +# Launch an EC2 instance +- ec2: + image: "{{ ami_search.results[0].ami_id }}" + instance_type: m3.medium + key_name: mykey + wait: yes +''' + +try: + import boto.ec2 + HAS_BOTO=True +except ImportError: + HAS_BOTO=False + +import json + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + region = dict(required=True, + aliases = ['aws_region', 'ec2_region']), + owner = dict(required=False, default=None), + ami_id = dict(required=False), + ami_tags = dict(required=False, type='dict', + aliases = ['search_tags', 'image_tags']), + architecture = dict(required=False), + hypervisor = dict(required=False), + is_public = dict(required=False), + name = dict(required=False), + platform = dict(required=False), + sort = dict(required=False, default=None, + choices=['name', 'description', 'tag']), + sort_tag = dict(required=False), + sort_order = dict(required=False, default='ascending', + choices=['ascending', 'descending']), + sort_start = dict(required=False), + sort_end = dict(required=False), + state = dict(required=False, default='available'), + virtualization_type = dict(required=False), + no_result_action = dict(required=False, default='success', + choices = ['success', 'fail']), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module, install via pip or your package manager') + + ami_id = module.params.get('ami_id') + ami_tags = module.params.get('ami_tags') + architecture = module.params.get('architecture') + hypervisor = module.params.get('hypervisor') + is_public = module.params.get('is_public') + name = module.params.get('name') + owner = module.params.get('owner') + platform = module.params.get('platform') + sort = module.params.get('sort') + sort_tag = module.params.get('sort_tag') + sort_order = module.params.get('sort_order') + sort_start = module.params.get('sort_start') + sort_end = module.params.get('sort_end') + state = module.params.get('state') + virtualization_type = module.params.get('virtualization_type') + no_result_action = module.params.get('no_result_action') + + filter = {'state': state} + + if ami_id: + filter['image_id'] = ami_id + if ami_tags: + for tag in ami_tags: + filter['tag:'+tag] = ami_tags[tag] + if architecture: + filter['architecture'] = architecture + if hypervisor: + filter['hypervisor'] = hypervisor + if is_public: + filter['is_public'] = is_public + if name: + filter['name'] = name + if platform: + filter['platform'] = platform + if virtualization_type: + filter['virtualization_type'] = virtualization_type + + ec2 = ec2_connect(module) + + images_result = ec2.get_all_images(owners=owner, filters=filter) + + if no_result_action == 'fail' and len(images_result) == 0: + module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) + + results = [] + for image in images_result: + data = { + 'ami_id': image.id, + 'architecture': image.architecture, + 'description': image.description, + 'is_public': image.is_public, + 'name': image.name, + 'owner_id': image.owner_id, + 'platform': image.platform, + 'root_device_name': image.root_device_name, + 'root_device_type': image.root_device_type, + 'state': image.state, + 'tags': image.tags, + 'virtualization_type': image.virtualization_type, + } + + if image.kernel_id: + data['kernel_id'] = image.kernel_id + if image.ramdisk_id: + data['ramdisk_id'] = image.ramdisk_id + + results.append(data) + + if sort == 'tag': + if not sort_tag: + module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") + results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) + elif sort: + results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) + + try: + if sort and sort_start and sort_end: + results = results[int(sort_start):int(sort_end)] + elif sort and sort_start: + results = results[int(sort_start):] + elif sort and sort_end: + results = results[:int(sort_end)] + except TypeError: + module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") + + module.exit_json(results=results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() + diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 34172396a..39ad9d089 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -4,32 +4,64 @@ env: "{{ cluster }}" env_host_type: "{{ cluster }}-openshift-{{ type }}" host_type: "{{ type }}" - machine_type: "{{ lookup('env', 'ec2_instance_type') - | default(deployment_vars[deployment_type].type, true) }}" - machine_image: "{{ lookup('env', 'ec2_ami') - | default(deployment_vars[deployment_type].image, true) }}" - machine_region: "{{ lookup('env', 'ec2_region') - | default(deployment_vars[deployment_type].region, true) }}" - machine_keypair: "{{ lookup('env', 'ec2_keypair') - | default(deployment_vars[deployment_type].keypair, true) }}" - machine_subnet: "{{ lookup('env', 'ec2_vpc_subnet') - | default(deployment_vars[deployment_type].vpc_subnet, true) }}" - machine_public_ip: "{{ lookup('env', 'ec2_public_ip') - | default(deployment_vars[deployment_type].assign_public_ip, true) }}" - security_groups: "{{ lookup('env', 'ec2_security_groups') - | default(deployment_vars[deployment_type].security_groups, true) }}" + +- set_fact: + ec2_region: "{{ lookup('env', 'ec2_region') + | default(deployment_vars[deployment_type].region, true) }}" + when: ec2_region is not defined +- set_fact: + ec2_image_name: "{{ lookup('env', 'ec2_image_name') + | default(deployment_vars[deployment_type].image_name, true) }}" + when: ec2_image_name is not defined and ec2_image is not defined +- set_fact: + ec2_image: "{{ lookup('env', 'ec2_image') + | default(deployment_vars[deployment_type].image, true) }}" + when: ec2_image is not defined and not ec2_image_name +- set_fact: + ec2_instance_type: "{{ lookup('env', 'ec2_instance_type') + | default(deployment_vars[deployment_type].type, true) }}" + when: ec2_instance_type is not defined +- set_fact: + ec2_keypair: "{{ lookup('env', 'ec2_keypair') + | default(deployment_vars[deployment_type].keypair, true) }}" + when: ec2_keypair is not defined +- set_fact: + ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet') + | default(deployment_vars[deployment_type].vpc_subnet, true) }}" + when: ec2_vpc_subnet is not defined +- set_fact: + ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip') + | default(deployment_vars[deployment_type].assign_public_ip, true) }}" + when: ec2_assign_public_ip is not defined +- set_fact: + ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') + | default(deployment_vars[deployment_type].security_groups, true) }}" + when: ec2_security_groups is not defined + +- name: Find amis for deployment_type + ec2_ami_find: + region: "{{ ec2_region }}" + ami_id: "{{ ec2_image | default(omit, true) }}" + name: "{{ ec2_image_name | default(omit, true) }}" + register: ami_result + +- fail: msg="Could not find requested ami" + when: not ami_result.results + +- set_fact: + latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" - name: Launch instance(s) ec2: state: present - region: "{{ machine_region }}" - keypair: "{{ machine_keypair }}" - group: "{{ security_groups }}" - instance_type: "{{ machine_type }}" - image: "{{ machine_image }}" + region: "{{ ec2_region }}" + keypair: "{{ ec2_keypair }}" + group: "{{ ec2_security_groups }}" + instance_type: "{{ ec2_instance_type }}" + image: "{{ latest_ami }}" count: "{{ instances | oo_len }}" - vpc_subnet_id: "{{ machine_subnet | default(omit, true) }}" - assign_public_ip: "{{ machine_public_ip | default(omit, true) }}" + vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}" + assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}" wait: yes instance_tags: created-by: "{{ created_by }}" @@ -39,7 +71,7 @@ register: ec2 - name: Add Name tag to instances - ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present + ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present with_together: - instances - ec2.instances diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.defaults.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.int.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml new file mode 100644 index 000000000..12f79a9c1 --- /dev/null +++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml @@ -0,0 +1,9 @@ +--- +ec2_image: ami-906240f8 +ec2_image_name: libra-ops-rhel7* +ec2_region: us-east-1 +ec2_keypair: mmcgrath_libra +ec2_instance_type: m3.large +ec2_security_groups: [ 'int-v3' ] +ec2_vpc_subnet: subnet-987c0def +ec2_assign_public_ip: yes diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index f87e7aba3..07e453f89 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -3,6 +3,7 @@ deployment_vars: origin: # fedora, since centos requires marketplace image: ami-acd999c4 + image_name: region: us-east-1 ssh_user: fedora sudo: yes @@ -13,18 +14,20 @@ deployment_vars: assign_public_ip: online: # private ami - image: ami-906240f8 + image: ami-7a9e9812 + image_name: openshift-rhel7_* region: us-east-1 ssh_user: root sudo: no - keypair: mmcgrath_libra + keypair: libra type: m3.large - security_groups: [ 'int-v3' ] - vpc_subnet: subnet-987c0def - assign_public_ip: yes + security_groups: [ 'public' ] + vpc_subnet: + assign_public_ip: enterprise: # rhel-7.1, requires cloud access subscription image: ami-10663b78 + image_name: region: us-east-1 ssh_user: ec2-user sudo: yes -- cgit v1.2.3 From 378e8a8c9d7e7be7f52691e957f07096ee0b2c82 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Thu, 16 Apr 2015 01:49:29 -0400 Subject: lvm-direct support for aws - Create a separate docker volume in aws openshift-cluster playbooks - default to using ephemeral storage, but allow to be overriden - allow root volume settingsto be overriden as well - add user-data cloud-config to bootstrap the installation/configuration of docker-storage-setup - pylint cleanup for oo_filters.py - remove left over traces to the deployment_type tags which were previously removed - oo_get_deployment_type_from_groups filter in oo_filters.py - cluster list playbooks references to oo_get_deployment_type_from_groups filter --- README_AWS.md | 21 ++++++++- filter_plugins/oo_filters.py | 50 +++++++++++++++++++++- playbooks/aws/openshift-cluster/list.yml | 2 +- .../openshift-cluster/tasks/launch_instances.yml | 22 ++++++++++ .../aws/openshift-cluster/templates/user_data.j2 | 29 +++++++++++++ playbooks/gce/openshift-cluster/list.yml | 2 +- playbooks/libvirt/openshift-cluster/list.yml | 2 +- 7 files changed, 123 insertions(+), 5 deletions(-) create mode 100644 playbooks/aws/openshift-cluster/templates/user_data.j2 (limited to 'playbooks/aws') diff --git a/README_AWS.md b/README_AWS.md index 37f4c5f51..888abe939 100644 --- a/README_AWS.md +++ b/README_AWS.md @@ -40,11 +40,25 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne By default, a cluster is launched with the following configuration: - Instance type: m3.large -- AMI: ami-307b3658 +- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments) - Region: us-east-1 - Keypair name: libra - Security group: public +Master specific defaults: +- Master root volume size: 10 (in GiBs) +- Master root volume type: gp2 +- Master root volume iops: 500 (only applicable when volume type is io1) + +Node specific defaults: +- Node root volume size: 10 (in GiBs) +- Node root volume type: gp2 +- Node root volume iops: 500 (only applicable when volume type is io1) +- Docker volume size: 25 (in GiBs) +- Docker volume ephemeral: true (Whether the docker volume is ephemeral) +- Docker volume type: gp2 (only applicable if ephemeral is false) +- Docker volume iops: 500 (only applicable when volume type is io1) + If needed, these values can be changed by setting environment variables on your system. - export ec2_instance_type='m3.large' @@ -52,6 +66,11 @@ If needed, these values can be changed by setting environment variables on your - export ec2_region='us-east-1' - export ec2_keypair='libra' - export ec2_security_group='public' +- export os_master_root_vol_size='20' +- export os_master_root_vol_type='standard' +- export os_node_root_vol_size='15' +- export os_docker_vol_size='50' +- export os_docker_vol_ephemeral='false' Install Dependencies -------------------- diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index d22b6d188..097038450 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -129,6 +129,53 @@ def oo_ami_selector(data, image_name): ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] return ami['ami_id'] +def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False): + ''' This takes a dictionary of volume definitions and returns a valid ec2 + volume definition based on the host_type and the values in the + dictionary. + The dictionary should look similar to this: + { 'master': + { 'root': + { 'volume_size': 10, 'device_type': 'gp2', + 'iops': 500 + } + }, + 'node': + { 'root': + { 'volume_size': 10, 'device_type': 'io1', + 'iops': 1000 + }, + 'docker': + { 'volume_size': 40, 'device_type': 'gp2', + 'iops': 500, 'ephemeral': 'true' + } + } + } + ''' + if not issubclass(type(data), dict): + raise errors.AnsibleFilterError("|failed expects first param is a dict") + if host_type not in ['master', 'node']: + raise errors.AnsibleFilterError("|failed expects either master or node" + " host type") + + root_vol = data[host_type]['root'] + root_vol['device_name'] = '/dev/sda1' + root_vol['delete_on_termination'] = True + if root_vol['device_type'] != 'io1': + root_vol.pop('iops', None) + if host_type == 'node': + docker_vol = data[host_type]['docker'] + docker_vol['device_name'] = '/dev/xvdb' + docker_vol['delete_on_termination'] = True + if docker_vol['device_type'] != 'io1': + docker_vol.pop('iops', None) + if docker_ephemeral: + docker_vol.pop('device_type', None) + docker_vol.pop('delete_on_termination', None) + docker_vol['ephemeral'] = 'ephemeral0' + return [root_vol, docker_vol] + return [root_vol] + # disabling pylint checks for too-few-public-methods and no-self-use since we # need to expose a FilterModule object that has a filters method that returns # a mapping of filter names to methods. @@ -144,5 +191,6 @@ class FilterModule(object): "oo_len": oo_len, "oo_pdb": oo_pdb, "oo_prepend_strings_in_list": oo_prepend_strings_in_list, - "oo_ami_selector": oo_ami_selector + "oo_ami_selector": oo_ami_selector, + "oo_ec2_volume_definition": oo_ec2_volume_definition } diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index 5c04bc320..04fcdc0a1 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -21,4 +21,4 @@ gather_facts: no tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" + msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index 39ad9d089..666a8d1fb 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -1,6 +1,7 @@ --- - set_fact: created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" + docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" env: "{{ cluster }}" env_host_type: "{{ cluster }}-openshift-{{ type }}" host_type: "{{ type }}" @@ -50,6 +51,25 @@ - set_fact: latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}" + user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}" + volume_defs: + master: + root: + volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" + device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" + node: + root: + volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}" + device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}" + docker: + volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}" + device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" + iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" + +- set_fact: + volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}" - name: Launch instance(s) ec2: @@ -62,12 +82,14 @@ count: "{{ instances | oo_len }}" vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}" assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}" + user_data: "{{ user_data }}" wait: yes instance_tags: created-by: "{{ created_by }}" env: "{{ env }}" host-type: "{{ host_type }}" env-host-type: "{{ env_host_type }}" + volumes: "{{ volumes }}" register: ec2 - name: Add Name tag to instances diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 new file mode 100644 index 000000000..7dbc8f552 --- /dev/null +++ b/playbooks/aws/openshift-cluster/templates/user_data.j2 @@ -0,0 +1,29 @@ +#cloud-config +yum_repos: + jdetiber-copr: + name: Copr repo for origin owned by jdetiber + baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/ + skip_if_unavailable: true + gpgcheck: true + gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg + enabled: true + +packages: +- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8 +- docker-storage-setup + +mounts: +- [ xvdb ] +- [ ephemeral0 ] + +write_files: +- content: | + DEVS=/dev/xvdb + VG=docker_vg + path: /etc/sysconfig/docker-storage-setup + owner: root:root + permissions: '0644' + +runcmd: +- systemctl daemon-reload +- systemctl enable lvm2-lvmetad.service docker-storage-setup.service diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index bab2fb9f8..962381306 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -21,4 +21,4 @@ gather_facts: no tasks: - debug: - msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}" + msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}" diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index 25a25f791..eaedc4d0d 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -20,4 +20,4 @@ hosts: oo_list_hosts tasks: - debug: - msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}} deployment-type: {{ hostvars[inventory_hostname].group_names | oo_get_deployment_type_from_groups }}' + msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}' -- cgit v1.2.3