summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/uninstall.yml3
-rw-r--r--playbooks/aws/openshift-cluster/list.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml26
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml73
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml47
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml86
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-master/config.yml2
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml14
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml163
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml24
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data13
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
25 files changed, 389 insertions, 843 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 789f66b14..4ea639cbe 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -338,6 +338,7 @@
failed_when: False
with_items:
- etcd
+ - etcd3
- firewalld
- name: Stop additional atomic services
@@ -352,6 +353,7 @@
when: not is_atomic | bool
with_items:
- etcd
+ - etcd3
- shell: systemctl reset-failed
changed_when: False
@@ -365,6 +367,7 @@
- /etc/ansible/facts.d/openshift.fact
- /etc/etcd
- /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 4934ae6d0..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 381e3ed8f..834461e14 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,6 +1,6 @@
- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
@@ -19,19 +19,19 @@
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
@@ -40,7 +40,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
new file mode 100644
index 000000000..c25f96212
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -0,0 +1,26 @@
+---
+- include: ../../../common/openshift-cluster/verify_ansible_version.yml
+
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+
+- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 74147fe01..5f008a045 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -212,7 +212,7 @@
- name: Determine if node is currently scheduleable
command: >
{{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- get node {{ openshift.common.hostname | lower }} -o json
+ get node {{ openshift.node.nodename }} -o json
register: node_output
when: openshift_certificates_redeploy_ca | default(false) | bool
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -225,7 +225,7 @@
- name: Prepare for node evacuation
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -233,7 +233,7 @@
- name: Evacuate node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -241,7 +241,7 @@
- name: Set node schedulability
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
new file mode 100644
index 000000000..57b156b1c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -0,0 +1,73 @@
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
new file mode 100644
index 000000000..35f391f8c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -0,0 +1,47 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Get current image
+ shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
+ else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: /etc/systemd/system/etcd_container.service
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: etcd_container
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest"
+ when: not openshift.common.is_atomic | bool
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
new file mode 100644
index 000000000..30232110e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
@@ -0,0 +1,23 @@
+---
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd
+ package:
+ name: "etcd"
+ state: "latest"
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
new file mode 120000
index 000000000..641e04e44
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
@@ -0,0 +1 @@
+../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
new file mode 100644
index 000000000..cce844403
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -0,0 +1,122 @@
+---
+# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
+# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
+# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
+# mirrored packages on your own because only the GA and latest versions are
+# available in the repos. So for Fedora we'll simply skip this, sorry.
+
+- include: ../../evaluate_groups.yml
+ tags:
+ - always
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+
+- name: Backup etcd before upgrading anything
+ include: backup.yml
+ vars:
+ backup_tag: "pre-upgrade-"
+
+- name: Drop etcdctl profiles
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - include: roles/etcd/tasks/etcdctl.yml
+
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.3
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
new file mode 100644
index 000000000..8e7dc9d9b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd package but exclude etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3"
+ when: upgrade_version | version_compare('3.0','<')
+
+- name: Update etcd package not excluding etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*"
+ when: not upgrade_version | version_compare('3.0','<')
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 927d9b4ca..57c25aa41 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -13,80 +13,22 @@
groups: etcd_hosts_to_backup
with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+# If facts cache were for some reason deleted, this fact may not be set, and if not set
+# it will always default to true. This causes problems for the etcd data dir fact detection
+# so we must first make sure this is set correctly before attempting the backup.
+- name: Set master embedded_etcd fact
+ hosts: oo_masters_to_config
roles:
- openshift_facts
tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
+ role: master
+ local_facts:
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
+- name: Backup etcd
+ include: ./etcd/backup.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
@@ -215,6 +157,12 @@
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
run_once: true
+ - name: Reconcile Jenkins Pipeline Role Bindings
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ run_once: true
+ when: openshift.common.version_gte_3_4_or_1_4 | bool
+
- name: Reconcile Security Context Constraints
command: >
{{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e66344f99..1f314c854 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -17,7 +17,7 @@
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Determine if node is currently scheduleable
command: >
- {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
changed_when: false
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
tasks:
@@ -64,10 +64,12 @@
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
until: node_sched.rc == 0
retries: 3
delay: 1
+
+
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index a53c55c14..5fcb850a2 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -53,7 +53,7 @@
when: openshift_hosted_metrics_deployer_prefix is not defined
- set_fact:
openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
- when: openshift_hosted_metrics_deployer_prefix is not defined
+ when: openshift_hosted_metrics_deployer_version is not defined
roles:
- openshift_facts
post_tasks:
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 34dcd2496..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 7c8189224..b7604580c 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,6 +9,7 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
# unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 86d5d0aad..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 458cf5ac7..20ce47c07 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -45,7 +45,7 @@ parameters:
node_port_incoming:
type: string
label: Source of node port connections
- description: Authorized sources targetting node ports
+ description: Authorized sources targeting node ports
default: 0.0.0.0/0
num_etcd:
@@ -88,11 +88,6 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
- dns_image:
- type: string
- label: DNS image
- description: Name of the image for the DNS server
-
etcd_flavor:
type: string
label: Etcd flavor
@@ -113,11 +108,6 @@ parameters:
label: Infra flavor
description: Flavor of the infra node servers
- dns_flavor:
- type: string
- label: DNS flavor
- description: Flavor of the DNS server
-
outputs:
etcd_names:
@@ -168,26 +158,6 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ip:
- description: Floating IP of the DNS
- value:
- get_attr:
- - dns
- - addresses
- - str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- - 1
- - addr
-
resources:
net:
@@ -213,22 +183,7 @@ resources:
template: subnet_24_prefix.0/24
params:
subnet_24_prefix: { get_param: subnet_24_prefix }
- allocation_pools:
- - start:
- str_replace:
- template: subnet_24_prefix.3
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- end:
- str_replace:
- template: subnet_24_prefix.254
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers:
- - str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
+ dns_nameservers: { get_param: dns_nameservers }
router:
type: OS::Neutron::Router
@@ -428,44 +383,6 @@ resources:
port_range_min: 443
port_range_max: 443
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: etcd-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: node-secgrp }
-
etcd:
type: OS::Heat::ResourceGroup
properties:
@@ -599,79 +516,3 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on:
- interface
-
- dns:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: cluster_id-dns
- params:
- cluster_id: { get_param: cluster_id }
- key_name: { get_resource: keypair }
- image: { get_param: dns_image }
- flavor: { get_param: dns_flavor }
- networks:
- - port: { get_resource: dns-port }
- user_data: { get_resource: dns-config }
- user_data_format: RAW
-
- dns-port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: net }
- fixed_ips:
- - subnet: { get_resource: subnet }
- ip_address:
- str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- security_groups:
- - { get_resource: dns-secgrp }
-
- dns-floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: external_net }
- port_id: { get_resource: dns-port }
-
- dns-config:
- type: OS::Heat::MultipartMime
- properties:
- parts:
- - config:
- str_replace:
- template: |
- #cloud-config
- disable_root: true
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/network-scripts/ifcfg-eth0
- content: |
- DEVICE="eth0"
- BOOTPROTO="dhcp"
- DNS1="$dns1"
- DNS2="$dns2"
- PEERDNS="no"
- ONBOOT="yes"
- runcmd:
- - [ "/usr/bin/systemctl", "restart", "network" ]
- params:
- $dns1:
- get_param:
- - dns_nameservers
- - 0
- $dns2:
- get_param:
- - dns_nameservers
- - 1
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index f83f2c984..435139849 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -107,7 +107,7 @@ resources:
flavor: { get_param: flavor }
networks:
- port: { get_resource: port }
- user_data: { get_file: user-data }
+ user_data: { get_resource: config }
user_data_format: RAW
metadata:
environment: { get_param: cluster_env }
@@ -128,3 +128,25 @@ resources:
properties:
floating_network: { get_param: floating_network }
port_id: { get_resource: port }
+
+ config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ disable_root: true
+
+ hostname: { get_param: name }
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ # content: Defaults:openshift !requiretty
+ # Encoded in base64 to be sure that we do not forget the trailing newline or
+ # sudo will not be able to parse that file
+ encoding: b64
+ content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
deleted file mode 100644
index eb65f7cec..000000000
--- a/playbooks/openstack/openshift-cluster/files/user-data
+++ /dev/null
@@ -1,13 +0,0 @@
-#cloud-config
-disable_root: true
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"