summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/README.md19
-rw-r--r--playbooks/adhoc/README.md5
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml2
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml1
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml3
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml2
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py (renamed from playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py)12
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml8
-rw-r--r--playbooks/adhoc/noc/create_host.yml59
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml38
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml43
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml3
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml2
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml281
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml60
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml19
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/aws/README.md4
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/aws/openshift-cluster/config.yml6
-rw-r--r--playbooks/aws/openshift-cluster/library/ec2_ami_find.py1
-rw-r--r--playbooks/aws/openshift-cluster/list.yml9
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml30
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml84
-rw-r--r--playbooks/byo/README.md11
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/byo/openshift-cluster/config.yml3
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml12
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml24
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/README.md18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml65
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml3
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml36
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/byo/openshift-preflight/README.md43
-rw-r--r--playbooks/byo/openshift-preflight/check.yml31
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml6
-rw-r--r--playbooks/common/README.md9
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml1
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml30
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml6
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml17
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml27
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml27
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml44
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml20
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml94
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml102
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml166
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml4
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml11
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml49
-rw-r--r--playbooks/common/openshift-master/restart.yml13
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml3
-rw-r--r--playbooks/common/openshift-master/restart_services.yml13
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml82
-rw-r--r--playbooks/common/openshift-node/service.yml2
-rw-r--r--playbooks/gce/README.md4
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml14
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml5
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml23
-rw-r--r--playbooks/libvirt/README.md4
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml14
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml5
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml20
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml16
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml50
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml1
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml3
114 files changed, 1303 insertions, 1731 deletions
diff --git a/playbooks/README.md b/playbooks/README.md
new file mode 100644
index 000000000..5857a9f59
--- /dev/null
+++ b/playbooks/README.md
@@ -0,0 +1,19 @@
+# openshift-ansible playbooks
+
+In summary:
+
+- [`byo`](byo) (_Bring Your Own_ hosts) has the most actively maintained
+ playbooks for installing, upgrading and performing others tasks on OpenShift
+ clusters.
+- [`common`](common) has a set of playbooks that are included by playbooks in
+ `byo` and others.
+
+And:
+
+- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
+ supported and not officially maintained.
+- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
+ are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
+
+Refer to the `README.md` file in each playbook directory for more information
+about them.
diff --git a/playbooks/adhoc/README.md b/playbooks/adhoc/README.md
new file mode 100644
index 000000000..69b9d3135
--- /dev/null
+++ b/playbooks/adhoc/README.md
@@ -0,0 +1,5 @@
+# _Ad hoc_ playbooks
+
+This directory holds playbooks and tasks that really don't have a better home.
+Existing playbooks living here are community supported and not officially
+maintained.
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index 5a5a00ea4..3c157bbf3 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -19,7 +19,7 @@
changed_when: False
failed_when: False
- - shell: docker images -q |xargs docker rmi
+ - shell: docker images -q |xargs docker rmi
changed_when: False
failed_when: False
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index b370d7fba..f12885b3a 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,3 +1,4 @@
+---
- hosts: OSEv3
gather_facts: false
tasks:
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index 4d32fc40b..f638fab83 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -56,7 +56,7 @@
- name: fail if we don't detect loopback
fail:
- msg: loopback not detected! Please investigate manually.
+ msg: loopback not detected! Please investigate manually.
when: loop_device_check.rc == 1
- name: stop zagg client monitoring container
@@ -139,4 +139,3 @@
register: dockerstart
- debug: var=dockerstart
-
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
index 1438fd7d5..d988a28b0 100755
--- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -43,7 +43,7 @@
- name: fail if we don't detect loopback
fail:
- msg: loopback not detected! Please investigate manually.
+ msg: loopback not detected! Please investigate manually.
when: loop_device_check.rc == 1
- name: stop zagg client monitoring container
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index d0264cde9..daff68fbe 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -5,22 +5,11 @@
Custom filters for use in openshift-ansible
'''
-import pdb
-
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
def translate_volume_name(volumes, target_volume):
'''
This filter matches a device string /dev/sdX to /dev/xvdX
@@ -33,7 +22,6 @@ class FilterModule(object):
return None
-
def filters(self):
''' returns a mapping of filters to methods '''
return {
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
index d24e9cafa..598f1966d 100644
--- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -37,7 +37,7 @@
vars:
cli_volume_type: gp2
cli_volume_size: 200
-# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+ #cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -65,7 +65,7 @@
- name: fail if we don't detect devicemapper
fail:
- msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
when: device_mapper_check.rc == 1
# docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
@@ -80,7 +80,7 @@
- name: fail if we don't find a docker volume group
fail:
- msg: Unable to find docker volume group. Please investigate manually.
+ msg: Unable to find docker volume group. Please investigate manually.
when: docker_vg_name.stdout_lines|length != 1
# docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
@@ -95,7 +95,7 @@
- name: fail if we don't find a docker physical volume
fail:
- msg: Unable to find docker physical volume. Please investigate manually.
+ msg: Unable to find docker physical volume. Please investigate manually.
when: docker_pv_name.stdout_lines|length != 1
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
deleted file mode 100644
index 2d2cae2b5..000000000
--- a/playbooks/adhoc/noc/create_host.yml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Template
- state: list
- params:
- host: ctr_test_kwoodson
- filter:
- host:
- - ctr_kwoodson_test_tmpl
-
- register: tmpl_results
-
- - debug: var=tmpl_results
-
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Host
- state: absent
- params:
- host: ctr_test_kwoodson
- interfaces:
- - type: 1
- main: 1
- useip: 1
- ip: 127.0.0.1
- dns: ""
- port: 10050
- groups:
- - groupid: 1
- templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
- output: extend
- filter:
- host:
- - ctr_test_kwoodson
-
- register: host_results
-
- - debug: var=host_results
-
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
deleted file mode 100644
index 8ad5fa0e2..000000000
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a maintenace object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- vars:
- oo_hostids: ''
- oo_groupids: ''
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Maintenance
- state: present
- params:
- name: "{{ oo_name }}"
- description: "{{ oo_desc }}"
- active_since: "{{ oo_start }}"
- active_till: "{{ oo_stop }}"
- maintenance_type: "0"
- output: extend
- hostids: "{{ oo_hostids.split(',') | default([]) }}"
-#groupids: "{{ oo_groupids.split(',') | default([]) }}"
- timeperiods:
- - start_time: "{{ oo_start }}"
- period: "{{ oo_stop }}"
- selectTimeperiods: extend
-
- register: maintenance
-
- - debug: var=maintenance
-
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
deleted file mode 100644
index 32fc7ce68..000000000
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: 'Get current hosts who have triggers that are alerting by trigger description'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Trigger
- state: list
- params:
- only_true: true
- output: extend
- selectHosts: extend
- searchWildCardsEnabled: 1
- search:
- description: "{{ oo_desc }}"
- register: problems
-
- - debug: var=problems
-
- - set_fact:
- problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
-
- - debug: var=problem_hosts
-
- - add_host:
- name: "{{ item }}"
- groups: problem_hosts_group
- with_items: "{{ problem_hosts }}"
-
-- name: "Run on problem hosts"
- hosts: problem_hosts_group
- gather_facts: no
- tasks:
- - command: "{{ oo_cmd }}"
- when: oo_cmd is defined
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
index a3121d046..def1d24e0 100644
--- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -2,5 +2,4 @@
- hosts: masters[0]
roles:
- role: openshift_hosted_logging
- openshift_hosted_logging_cleanup: no
-
+ openshift_hosted_logging_cleanup: no
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
index daf84e242..2c79a1b4d 100644
--- a/playbooks/adhoc/s3_registry/s3_registry.yml
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -22,7 +22,7 @@
tasks:
- name: Check for AWS creds
- fail:
+ fail:
msg: "Couldn't find {{ item }} creds in ENV"
when: "{{ item }} == ''"
with_items:
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
index 08e8f8968..ae7d01730 100755
--- a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -7,7 +7,7 @@
- name: Check vars
hosts: localhost
gather_facts: false
-
+
pre_tasks:
- fail:
msg: "Playbook requires host to be set"
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 789f66b14..f0cfa7f55 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -75,6 +75,10 @@
- hosts: nodes
become: yes
+ vars:
+ node_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -83,59 +87,66 @@
with_items:
- firewalld
- - name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
- when: not is_atomic | bool
- with_items:
- - atomic-enterprise
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - atomic-openshift
- - atomic-openshift-clients
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - cockpit-bridge
- - cockpit-docker
- - cockpit-shell
- - cockpit-ws
- - kubernetes-client
- - openshift
- - openshift-node
- - openshift-sdn
- - openshift-sdn-ovs
- - openvswitch
- - origin
- - origin-clients
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - tuned-profiles-atomic-openshift-node
- - tuned-profiles-openshift-node
- - tuned-profiles-origin-node
-
- - name: Remove flannel package
- action: "{{ ansible_pkg_mgr }} name=flannel state=absent"
- when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - name: Remove br0 interface
- shell: ovs-vsctl del-br br0
- changed_when: False
- failed_when: False
-
- - name: Remove linux interfaces
- shell: ip link del "{{ item }}"
- changed_when: False
- failed_when: False
- with_items:
- - lbr0
- - vlinuxbr
- - vovsbr
+ - block:
+ - block:
+ - name: Remove packages
+ package: name={{ item }} state=absent
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - kubernetes-client
+ - openshift
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-excluder
+ - origin-docker-excluder
+ - origin-clients
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove flannel package
+ package: name=flannel state=absent
+ when: openshift_use_flannel | default(false) | bool
+ when: "{{ not is_atomic | bool }}"
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+ when: "{{ openshift_remove_all | default(true) | bool }}"
- shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
@@ -172,28 +183,57 @@
failed_when: False
with_items: "{{ exited_containers_to_delete.results }}"
- - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
- changed_when: False
- failed_when: False
- register: images_to_delete
+ - block:
+ - block:
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+ when: "{{ openshift_uninstall_images | default(True) | bool }}"
+
+ - name: remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
+ - /run/openshift-sdn
+ when: "{{ openshift_remove_all | default(True) | bool }}"
+
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- - registry\.qe\.openshift\.com/.*
- - registry\.access\..*redhat\.com/rhel7/etcd
- - docker.io/openshift
- when: openshift_uninstall_images | default(True) | bool
-
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ images_to_delete.results }}"
- when: openshift_uninstall_images | default(True) | bool
+ - "{{ node_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ node_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
- - name: Remove sdn drop files
- file:
- path: /run/openshift-sdn
- state: absent
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
- name: Remove remaining files
file: path={{ item }} state=absent
@@ -205,13 +245,10 @@
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- - /etc/sysconfig/openshift-node
- /etc/sysconfig/openshift-node-dep
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node-dep
@@ -223,10 +260,8 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
- - /run/openshift-sdn
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- name: restart docker
service: name=docker state=restarted
@@ -234,9 +269,12 @@
- name: restart NetworkManager
service: name=NetworkManager state=restarted
-
- hosts: masters
become: yes
+ vars:
+ master_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -247,13 +285,15 @@
- atomic-openshift-master
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
- when: not is_atomic | bool
+ package: name={{ item }} state=absent
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-master
- cockpit-bridge
- cockpit-docker
@@ -265,6 +305,8 @@
- openshift-master
- origin
- origin-clients
+ - origin-excluder
+ - origin-docker-excluder
- origin-master
- pacemaker
- pcs
@@ -275,6 +317,33 @@
- shell: systemctl daemon-reload
changed_when: False
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ when: openshift_remove_all | default(True) | bool
+ with_items:
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/openvswitch
+
+ - find: path={{ item }} file_type=file
+ register: files
+ with_items:
+ - "{{ master_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ master_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
@@ -284,7 +353,6 @@
- /etc/corosync
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/systemd/system/atomic-openshift-master.service
- /etc/systemd/system/atomic-openshift-master-api.service
- /etc/systemd/system/atomic-openshift-master-controllers.service
@@ -295,14 +363,12 @@
- /etc/sysconfig/atomic-enterprise-master
- /etc/sysconfig/atomic-enterprise-master-api
- /etc/sysconfig/atomic-enterprise-master-controllers
- - /etc/sysconfig/atomic-openshift-master
- /etc/sysconfig/atomic-openshift-master-api
- /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /etc/sysconfig/openshift-master
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
@@ -310,7 +376,6 @@
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- /var/lib/pacemaker
- /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
@@ -331,6 +396,10 @@
- hosts: etcd
become: yes
+ vars:
+ etcd_dirs:
+ - "/etc/etcd"
+ - "/var/lib/etcd"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -338,6 +407,7 @@
failed_when: False
with_items:
- etcd
+ - etcd3
- firewalld
- name: Stop additional atomic services
@@ -348,10 +418,11 @@
failed_when: false
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
- when: not is_atomic | bool
+ package: name={{ item }} state=absent
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- etcd
+ - etcd3
- shell: systemctl reset-failed
changed_when: False
@@ -359,12 +430,25 @@
- shell: systemctl daemon-reload
changed_when: False
- - name: Remove remaining files
- file: path={{ item }} state=absent
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - /etc/ansible/facts.d/openshift.fact
- - /etc/etcd
- - /etc/systemd/system/etcd_container.service
+ - "{{ etcd_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ etcd_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
@@ -374,6 +458,13 @@
warn: no
failed_when: false
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
+
- hosts: lb
become: yes
tasks:
@@ -385,8 +476,8 @@
- firewalld
- name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
- when: not is_atomic | bool
+ package: name={{ item }} state=absent
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
@@ -400,4 +491,4 @@
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- - /var/lib/haproxy
+ - /var/lib/haproxy/stats
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
deleted file mode 100644
index 09f7c76cc..000000000
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost:8080/zabbix/api_jsonrpc.php
- g_user: ''
- g_password: ''
-
- roles:
- - lib_zabbix
-
- post_tasks:
- - name: CLEAN List template for heartbeat
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template Heartbeat'
- register: templ_heartbeat
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Server'
- register: templ_zabbix_server
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Agent'
- register: templ_zabbix_agent
-
- - name: CLEAN List all templates
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- register: templates
-
- - debug: var=templ_heartbeat.results
-
- - name: Remove templates if heartbeat template is missing
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- name: "{{ item }}"
- state: absent
- with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
- when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/zabbix_setup/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
deleted file mode 100755
index 0fe65b338..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- include: clean_zabbix.yml
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
deleted file mode 100755
index 2f1d003ff..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
- g_zbx_scriptrunner_user: scriptrunner
- g_zbx_scriptrunner_bastion_host: specialhost.example.com
- roles:
- - role: os_zabbix
- ozb_server: "{{ g_server }}"
- ozb_user: "{{ g_user }}"
- ozb_password: "{{ g_password }}"
- ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
- ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/zabbix_setup/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
new file mode 100644
index 000000000..99698b4d0
--- /dev/null
+++ b/playbooks/aws/README.md
@@ -0,0 +1,4 @@
+# AWS playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index 119b376aa..fbaf81dec 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 05cfe7d6e..d60b68885 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,6 +1,4 @@
---
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
gather_facts: no
tasks:
@@ -19,8 +17,8 @@
- include: ../../common/openshift-cluster/config.yml
vars:
- g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].become }}"
+ g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
index 2b1db62d8..99d0f44f0 100644
--- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
+++ b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
#pylint: skip-file
+# flake8: noqa
#
# This file is part of Ansible
#
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 4934ae6d0..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 4d76d3bfe..608512b79 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -125,21 +125,21 @@
- set_fact:
logrotate:
- - name: syslog
- path: |
- /var/log/cron
- /var/log/maillog
- /var/log/messages
- /var/log/secure
- /var/log/spooler"
- options:
- - daily
- - rotate 7
- - compress
- - sharedscripts
- - missingok
- scripts:
- postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
+ - name: syslog
+ path: |
+ /var/log/cron
+ /var/log/maillog
+ /var/log/messages
+ /var/log/secure
+ /var/log/spooler"
+ options:
+ - daily
+ - rotate 7
+ - compress
+ - sharedscripts
+ - missingok
+ scripts:
+ postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
- name: Add new instances groups and variables
add_host:
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 7a8375d0e..1f15aa4bf 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -29,49 +29,49 @@
become: no
gather_facts: no
tasks:
- - name: Remove tags from instances
- ec2_tag:
- resource: "{{ hostvars[item]['ec2_id'] }}"
- region: "{{ hostvars[item]['ec2_region'] }}"
- state: absent
- tags:
- environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
- clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
- host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
- sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
+ - name: Remove tags from instances
+ ec2_tag:
+ resource: "{{ hostvars[item]['ec2_id'] }}"
+ region: "{{ hostvars[item]['ec2_region'] }}"
+ state: absent
+ tags:
+ environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
+ clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
+ host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
+ sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
+ when: "'oo_hosts_to_terminate' in groups"
- - name: Terminate instances
- ec2:
- state: absent
- instance_ids: ["{{ hostvars[item].ec2_id }}"]
- region: "{{ hostvars[item].ec2_region }}"
- ignore_errors: yes
- register: ec2_term
- with_items: "{{ groups.oo_hosts_to_terminate }}"
- when: "'oo_hosts_to_terminate' in groups"
+ - name: Terminate instances
+ ec2:
+ state: absent
+ instance_ids: ["{{ hostvars[item].ec2_id }}"]
+ region: "{{ hostvars[item].ec2_region }}"
+ ignore_errors: yes
+ register: ec2_term
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
+ when: "'oo_hosts_to_terminate' in groups"
- # Fail if any of the instances failed to terminate with an error other
- # than 403 Forbidden
- - fail:
- msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
+ # Fail if any of the instances failed to terminate with an error other
+ # than 403 Forbidden
+ - fail:
+ msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+ with_items: "{{ ec2_term.results }}"
- - name: Stop instance if termination failed
- ec2:
- state: stopped
- instance_ids: ["{{ item.item.ec2_id }}"]
- region: "{{ item.item.ec2_region }}"
- register: ec2_stop
- when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: "{{ ec2_term.results }}"
+ - name: Stop instance if termination failed
+ ec2:
+ state: stopped
+ instance_ids: ["{{ item.item.ec2_id }}"]
+ region: "{{ item.item.ec2_region }}"
+ register: ec2_stop
+ when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+ with_items: "{{ ec2_term.results }}"
- - name: Rename stopped instances
- ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
- args:
- tags:
- Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: "{{ ec2_stop.results }}"
- when: ec2_stop | changed
+ - name: Rename stopped instances
+ ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+ args:
+ tags:
+ Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+ with_items: "{{ ec2_stop.results }}"
+ when: ec2_stop | changed
diff --git a/playbooks/byo/README.md b/playbooks/byo/README.md
new file mode 100644
index 000000000..460fd7cf6
--- /dev/null
+++ b/playbooks/byo/README.md
@@ -0,0 +1,11 @@
+# Bring Your Own hosts playbooks
+
+This directory has the most actively used, maintained and supported set of
+playbooks for installing, upgrading and performing others tasks on OpenShift
+clusters.
+
+Usage is documented in the official OpenShift documentation pages, under the
+Advanced Installation topic:
+
+- [OpenShift Origin: Advanced Installation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+- [OpenShift Container Platform: Advanced Installation](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index 658204c17..cb464cf0d 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -1,19 +1,19 @@
---
-g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+g_etcd_hosts: "{{ groups.etcd | default([]) }}"
-g_lb_hosts: "{{ groups.lb | default([]) }}"
+g_lb_hosts: "{{ groups.lb | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
g_new_master_hosts: "{{ groups.new_masters | default([]) }}"
-g_node_hosts: "{{ groups.nodes | default([]) }}"
+g_node_hosts: "{{ groups.nodes | default([]) }}"
g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
-g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_nfs_hosts: "{{ groups.nfs | default([]) }}"
-g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | union(g_nfs_hosts)
- | union(g_new_node_hosts)| union(g_new_master_hosts)
- | default([]) }}"
+g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+ | union(g_lb_hosts) | union(g_nfs_hosts)
+ | union(g_new_node_hosts)| union(g_new_master_hosts)
+ | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index fccb03982..5d90da28a 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,6 +1,4 @@
---
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- name: Create initial host groups for localhost
hosts: localhost
connection: local
@@ -14,6 +12,7 @@
name: "{{ item }}"
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
+ changed_when: no
- name: Create initial host groups for all hosts
hosts: l_oo_all_hosts
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 0ba11a21b..fab3e111f 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,6 +1,4 @@
---
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
@@ -16,5 +14,5 @@
gather_facts: no
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-
+
- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index 6d1247e0f..73d9baadb 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -1,6 +1,4 @@
---
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
@@ -16,7 +14,7 @@
gather_facts: no
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-
+
- include: ../../common/openshift-cluster/redeploy-certificates.yml
vars:
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 834461e14..dc0bf73a2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,4 +1,4 @@
-
+---
- name: Check for appropriate Docker versions
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
@@ -18,20 +18,20 @@
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - name: Prepare for Node evacuation
+ - name: Prepare for Node draining
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
@@ -42,6 +42,4 @@
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: openshift.node.schedulable | bool
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
-
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 47a161d47..d337b6f75 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,3 +1,4 @@
+---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
- hosts: localhost
connection: local
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
new file mode 100644
index 000000000..a365ae994
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -0,0 +1,24 @@
+---
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+
+- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_2/README.md
deleted file mode 100644
index 30603463a..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_2/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# v3.2 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Unschedule node.
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
deleted file mode 100644
index d92761e48..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
-
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
-
-- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
-
-
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
- openshift_upgrade_min: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/post.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 9a5d84751..4ce815271 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -92,9 +92,8 @@
vars:
master_config_hook: "v3_3/master_config_upgrade.yml"
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index c9338a960..d6af71827 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -98,4 +98,3 @@
master_config_hook: "v3_3/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 4f8a80ee8..d6115e7a5 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -89,8 +89,9 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
new file mode 100644
index 000000000..344b22240
--- /dev/null
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts }}"
+
+- hosts: l_oo_all_hosts
+ become: yes
+ tasks:
+ - name: install NetworkManager
+ package:
+ name: 'NetworkManager'
+ state: present
+
+ - name: configure NetworkManager
+ lineinfile:
+ dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+ regexp: '^{{ item }}='
+ line: '{{ item }}=yes'
+ state: present
+ create: yes
+ with_items:
+ - 'USE_PEERDNS'
+ - 'NM_CONTROLLED'
+
+ - name: enable and start NetworkManager
+ service:
+ name: 'NetworkManager'
+ state: started
+ enabled: yes
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 902221931..d8556c94d 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -20,3 +20,5 @@
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
openshift_deployment_type: "{{ deployment_type }}"
+ openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ openshift_master_etcd_port: 2379
diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md
new file mode 100644
index 000000000..b50292eac
--- /dev/null
+++ b/playbooks/byo/openshift-preflight/README.md
@@ -0,0 +1,43 @@
+# OpenShift preflight checks
+
+Here we provide an Ansible playbook for detecting potential roadblocks prior to
+an install or upgrade.
+
+Ansible's default operation mode is to fail fast, on the first error. However,
+when performing checks, it is useful to gather as much information about
+problems as possible in a single run.
+
+The `check.yml` playbook runs a battery of checks against the inventory hosts
+and tells Ansible to ignore intermediate errors, thus giving a more complete
+diagnostic of the state of each host. Still, if any check failed, the playbook
+run will be marked as having failed.
+
+To facilitate understanding the problems that were encountered, we provide a
+custom callback plugin to summarize execution errors at the end of a playbook
+run.
+
+---
+
+*Note that currently the `check.yml` playbook is only useful for RPM-based
+installations. Containerized installs are excluded from checks for now, but
+might be included in the future if there is demand for that.*
+
+---
+
+## Running
+
+With an installation of Ansible 2.2 or greater, run the playbook directly
+against your inventory file. Here is the step-by-step:
+
+1. If you haven't done it yet, clone this repository:
+
+ ```console
+ $ git clone https://github.com/openshift/openshift-ansible
+ $ cd openshift-ansible
+ ```
+
+2. Run the playbook:
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml
+ ```
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
new file mode 100644
index 000000000..32673d01d
--- /dev/null
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -0,0 +1,31 @@
+---
+- hosts: OSEv3
+ roles:
+ - openshift_preflight/init
+
+- hosts: OSEv3
+ name: checks that apply to all hosts
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/common
+
+- hosts: masters
+ name: checks that apply to masters
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/masters
+
+- hosts: nodes
+ name: checks that apply to nodes
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/nodes
+
+- hosts: OSEv3
+ name: verify check results
+ gather_facts: no
+ roles:
+ - openshift_preflight/verify_status
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 8c0708df0..d1acf6175 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,6 +1,4 @@
---
-- include: ../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index f36caeb36..6eeba09d9 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -14,9 +14,9 @@
gather_facts: no
tasks:
- include_vars: openshift-cluster/cluster_hosts.yml
-
-- include: ../common/openshift-cluster/evaluate_groups.yml
-
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
- hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
new file mode 100644
index 000000000..0b5e26989
--- /dev/null
+++ b/playbooks/common/README.md
@@ -0,0 +1,9 @@
+# Common playbooks
+
+This directory has a generic set of playbooks that are included by playbooks in
+[`byo`](../byo), as well as other playbooks related to the
+[`bin/cluster`](../../bin) tool.
+
+Note: playbooks in this directory use generic group names that do not line up
+with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
+requiring an explicit remapping of groups.
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 825f46415..c0ea93d2c 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -1,3 +1,4 @@
+---
- name: Additional master configuration
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 801c8065d..0f226f5f9 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -12,6 +12,8 @@
- node
- include: initialize_openshift_version.yml
+ tags:
+ - always
- name: Set oo_option facts
hosts: oo_all_hosts
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 4cfe8617e..ca5177852 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -59,7 +59,7 @@
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
- - openshift_node_dnsmasq
+ - openshift_node_dnsmasq
post_tasks:
- modify_yaml:
dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index b3e02fb97..45a4875a3 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -7,27 +7,27 @@
tasks:
- fail:
msg: This playbook requires g_etcd_hosts to be set
- when: g_etcd_hosts is not defined
+ when: "{{ g_etcd_hosts is not defined }}"
- fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: g_master_hosts is not defined and g_new_master_hosts is not defined
+ when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
- fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: g_node_hosts is not defined and g_new_node_hosts is not defined
+ when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
- fail:
msg: This playbook requires g_lb_hosts to be set
- when: g_lb_hosts is not defined
+ when: "{{ g_lb_hosts is not defined }}"
- fail:
msg: This playbook requires g_nfs_hosts to be set
- when: g_nfs_hosts is not defined
+ when: "{{ g_nfs_hosts is not defined }}"
- fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
- name: Evaluate oo_all_hosts
add_host:
@@ -36,6 +36,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_all_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_masters
add_host:
@@ -44,6 +45,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
+ changed_when: no
- name: Evaluate oo_etcd_to_config
add_host:
@@ -52,6 +54,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_masters_to_config
add_host:
@@ -60,6 +63,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+ changed_when: no
- name: Evaluate oo_nodes_to_config
add_host:
@@ -68,23 +72,26 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ changed_when: no
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Evaluate oo_nodes_to_config
+ - name: Add master to oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
+ when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ changed_when: no
- name: Evaluate oo_first_etcd
add_host:
name: "{{ g_etcd_hosts[0] }}"
groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: g_etcd_hosts|length > 0
+ when: "{{ g_etcd_hosts|length > 0 }}"
+ changed_when: no
- name: Evaluate oo_first_master
add_host:
@@ -92,7 +99,8 @@
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_master_hosts|length > 0
+ when: "{{ g_master_hosts|length > 0 }}"
+ changed_when: no
- name: Evaluate oo_lb_to_config
add_host:
@@ -101,6 +109,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
+ changed_when: no
- name: Evaluate oo_nfs_to_config
add_host:
@@ -109,3 +118,4 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 6d83d2527..18f99728c 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -1,7 +1,11 @@
---
+- name: Ensure that all non-node hosts are accessible
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config
+ any_errors_fatal: true
+ tasks:
+
- name: Initialize host facts
hosts: oo_all_hosts
- any_errors_fatal: true
roles:
- openshift_facts
tasks:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..a1bd1bd92 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,5 +1,22 @@
---
# NOTE: requires openshift_facts be run
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
+ # https://github.com/openshift/openshift-ansible/issues/1138
+ - name: Check for bad combinations of yum and subscription-manager
+ command: >
+ {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
+ register: yum_ver_test
+ changed_when: false
+ when: not openshift.common.is_atomic | bool
+ - fail:
+ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
+ when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index ccbba54b4..ec5b18389 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -20,35 +20,14 @@
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- set_fact:
- logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
- logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- - role: openshift_cli
- - role: openshift_hosted_facts
- - role: openshift_projects
- # TODO: Move standard project definitions to openshift_hosted/vars/main.yml
- # Vars are not accessible in meta/main.yml in ansible-1.9.x
- openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_hosted_logging
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
new file mode 100644
index 000000000..9f38ceea6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -0,0 +1,5 @@
+---
+- name: OpenShift Metrics
+ hosts: oo_first_master
+ roles:
+ - openshift_metrics
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..6e3e04a6b 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -204,7 +204,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +222,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 439df5ffd..9f7961614 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,9 +1,14 @@
+---
+# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
+# restart services. We will unconditionally restart all containerized services
+# because we have to unconditionally restart Docker:
+- set_fact:
+ skip_node_svc_handlers: True
+
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-- name: Verifying the correct version was configured
- shell: grep {{ verify_upgrade_version }} {{ item }}
- with_items:
- - /etc/sysconfig/openvswitch
- - /etc/sysconfig/{{ openshift.common.service_type }}*
- when: verify_upgrade_version is defined
+# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
+# play when the node has already been marked schedulable again. (this would look strange
+# in logs otherwise)
+- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
new file mode 100644
index 000000000..1b418920f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -0,0 +1,27 @@
+---
+- name: Restart docker
+ service: name=docker state=restarted
+
+- name: Update docker facts
+ openshift_facts:
+ role: docker
+
+- name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
+ when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 417096dd0..17f8fc6e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -20,7 +20,7 @@
- debug: var=docker_image_count.stdout
- name: Remove all containers and images
- script: nuke_images.sh docker
+ script: nuke_images.sh
register: nuke_images_result
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
@@ -35,32 +35,7 @@
- service: name=docker state=stopped
- name: Upgrade Docker
- action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+ package: name=docker{{ '-' + docker_version }} state=present
-- service: name=docker state=started
-
-- name: Update docker facts
- openshift_facts:
- role: docker
-
-- name: Restart containerized services
- service: name={{ item }} state=started
- with_items:
- - etcd_container
- - openvswitch
- - "{{ openshift.common.service_type }}-master"
- - "{{ openshift.common.service_type }}-master-api"
- - "{{ openshift.common.service_type }}-master-controllers"
- - "{{ openshift.common.service_type }}-node"
- failed_when: false
- when: openshift.common.is_containerized | bool
-
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: inventory_hostname in groups.oo_masters_to_config
+- include: restart.yml
+ when: not skip_docker_restart | default(False) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index caf80b358..b2a2eac9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -9,6 +9,8 @@
- name: Check if Docker is installed
command: rpm -q docker
+ args:
+ warn: no
register: pkg_check
failed_when: pkg_check.rc > 1
changed_when: no
@@ -48,5 +50,5 @@
- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
set_fact:
- docker_upgrade_nuke_images: True
+ docker_upgrade_nuke_images: True
when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
new file mode 100644
index 000000000..d0eadf1fc
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -0,0 +1,94 @@
+---
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
+ - name: Install latest etcd for containerized or embedded
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
+
+ - name: Generate etcd backup
+ command: >
+ {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
new file mode 100644
index 000000000..5f8b59e17
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -0,0 +1,46 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Get current image
+ shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: /etc/systemd/system/etcd_container.service
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: etcd_container
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ package: name=etcd state=latest
+ when: not openshift.common.is_atomic | bool
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
new file mode 100644
index 000000000..30232110e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
@@ -0,0 +1,23 @@
+---
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd
+ package:
+ name: "etcd"
+ state: "latest"
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
new file mode 120000
index 000000000..641e04e44
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
@@ -0,0 +1 @@
+../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
new file mode 100644
index 000000000..8268adc2e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -0,0 +1,44 @@
+---
+# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
+# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
+# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
+# mirrored packages on your own because only the GA and latest versions are
+# available in the repos. So for Fedora we'll simply skip this, sorry.
+
+- include: ../../evaluate_groups.yml
+ tags:
+ - always
+
+# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+# The other for backing up which includes the embedded etcd host, there's no need to
+# upgrade embedded etcd that just happens when the master is updated.
+- name: Evaluate additional groups for etcd
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+
+- name: Backup etcd before upgrading anything
+ include: backup.yml
+ vars:
+ backup_tag: "pre-upgrade-"
+ when: openshift_etcd_backup | default(true) | bool
+
+- name: Drop etcdctl profiles
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - include: roles/etcd/tasks/etcdctl.yml
+
+- name: Perform etcd upgrade
+ include: ./upgrade.yml
+ when: openshift_etcd_upgrade | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
new file mode 100644
index 000000000..3a972e8ab
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -0,0 +1,20 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd RPM
+ package:
+ name: etcd-{{ upgrade_version }}*
+ state: latest
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
new file mode 100644
index 000000000..0f8d94737
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -0,0 +1,94 @@
+---
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.15
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
+ when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
deleted file mode 100644
index e5c958ebb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-"""
-Pre-upgrade checks that must be run on a master before proceeding with upgrade.
-"""
-# This is a script not a python module:
-# pylint: disable=invalid-name
-
-# NOTE: This script should not require any python libs other than what is
-# in the standard library.
-
-__license__ = "ASL 2.0"
-
-import json
-import os
-import subprocess
-import re
-
-# The maximum length of container.ports.name
-ALLOWED_LENGTH = 15
-# The valid structure of container.ports.name
-ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
-AT_LEAST_ONE_LETTER = re.compile('[a-z]')
-# look at OS_PATH for the full path. Default ot 'oc'
-OC_PATH = os.getenv('OC_PATH', 'oc')
-
-
-def validate(value):
- """
- validate verifies that value matches required conventions
-
- Rules of container.ports.name validation:
-
- * must be less that 16 chars
- * at least one letter
- * only a-z0-9-
- * hyphens can not be leading or trailing or next to each other
-
- :Parameters:
- - `value`: Value to validate
- """
- if len(value) > ALLOWED_LENGTH:
- return False
-
- if '--' in value:
- return False
-
- # We search since it can be anywhere
- if not AT_LEAST_ONE_LETTER.search(value):
- return False
-
- # We match because it must start at the beginning
- if not ALLOWED_CHARS.match(value):
- return False
- return True
-
-
-def list_items(kind):
- """
- list_items returns a list of items from the api
-
- :Parameters:
- - `kind`: Kind of item to access
- """
- response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
- items = json.loads(response)
- return items.get("items", [])
-
-
-def get(obj, *paths):
- """
- Gets an object
-
- :Parameters:
- - `obj`: A dictionary structure
- - `path`: All other non-keyword arguments
- """
- ret_obj = obj
- for path in paths:
- if ret_obj.get(path, None) is None:
- return []
- ret_obj = ret_obj[path]
- return ret_obj
-
-
-# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
- """
- Prints out results in human friendly way.
-
- :Parameters:
- - `namespace`: Namespace of the resource
- - `kind`: Kind of the resource
- - `item_name`: Name of the resource
- - `container_name`: Name of the container. May be "" when kind=Service.
- - `port_name`: Name of the port
- - `invalid_label`: The label of the invalid port. Port.name/targetPort
- - `valid`: True if the port is valid
- """
- if not valid:
- if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" %s="%s")' % (
- kind, item_name, namespace, container_name, invalid_label, port_name))
- else:
- print('%s/%s -n %s (%s="%s")' % (
- kind, item_name, namespace, invalid_label, port_name))
-
-
-def print_validation_header():
- """
- Prints the error header. Should run on the first error to avoid
- overwhelming the user.
- """
- print """\
-At least one port name is invalid and must be corrected before upgrading.
-Please update or remove any resources with invalid port names.
-
- Valid port names must:
-
- * be less that 16 characters
- * have at least one letter
- * contain only a-z0-9-
- * not start or end with -
- * not contain dashes next to each other ('--')
-"""
-
-
-def main():
- """
- main is the main entry point to this script
- """
- try:
- # the comma at the end suppresses the newline
- print "Checking for oc ...",
- subprocess.check_output([OC_PATH, 'whoami'])
- print "found"
- except:
- print(
- 'Unable to run "%s whoami"\n'
- 'Please ensure OpenShift is running, and "oc" is on your system '
- 'path.\n'
- 'You can override the path with the OC_PATH environment variable.'
- % OC_PATH)
- raise SystemExit(1)
-
- # Where the magic happens
- first_error = True
- for kind, path in [
- ('deploymentconfigs', ("spec", "template", "spec", "containers")),
- ('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers"))]:
- for item in list_items(kind):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for container in get(item, *path):
- container_name = container["name"]
- for port in get(container, "ports"):
- port_name = port.get("name", None)
- if not port_name:
- # Unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, kind, item_name,
- container_name, "Port.name", port_name, valid)
-
- # Services follow a different flow
- for item in list_items('services'):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for port in get(item, "spec", "ports"):
- port_name = port.get("targetPort", None)
- if isinstance(port_name, int) or port_name is None:
- # Integer only or unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, "services", item_name, "",
- "targetPort", port_name, valid)
-
- # If we had at least 1 error then exit with 1
- if not first_error:
- raise SystemExit(1)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
deleted file mode 100644
index 7bf249742..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-if [ `which dnf 2> /dev/null` ]; then
- installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-else
- installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-fi
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index fbdb7900a..8cac2fb3b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,6 +1,4 @@
---
-- include: ../verify_ansible_version.yml
-
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 9a065fd1c..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -17,6 +17,7 @@ requirements: [ ]
EXAMPLES = '''
'''
+
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
@@ -62,7 +63,6 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
-
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
@@ -118,7 +118,7 @@ def main():
# redefined-outer-name
global module
- module = AnsibleModule(
+ module = AnsibleModule( # noqa: F405
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
@@ -146,13 +146,14 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
+
# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
# import module snippets
-from ansible.module_utils.basic import *
+from ansible.module_utils.basic import * # noqa: E402,F403
if __name__ == '__main__':
main()
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index cd1139b29..df2b664d4 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,11 +1,8 @@
+---
# We verified latest rpm available is suitable, so just yum update.
- name: Upgrade packages
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+ package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
- name: Ensure python-yaml present for config upgrade
- action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
-
-- name: Restart node service
- service: name="{{ openshift.common.service_type }}-node" state=restarted
- when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 6b567e2e2..6950b6166 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -24,88 +24,14 @@
- openshift_facts:
role: master
local_facts:
- embedded_etcd: "{{ groups.oo_etcd_to_config | length == 0 }}"
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=installed"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
+- name: Upgrade and backup etcd
+ include: ./etcd/main.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
- handlers:
- - include: ../../../../roles/openshift_master/handlers/main.yml
- static: yes
roles:
- openshift_facts
tasks:
@@ -125,6 +51,14 @@
- include: create_service_signer_cert.yml
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
- name: Upgrade master config and systemd units
hosts: oo_masters_to_config
handlers:
@@ -132,7 +66,11 @@
static: yes
roles:
- openshift_facts
- tasks:
+ post_tasks:
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
+ - include: upgrade_scheduler.yml
+
- include: "{{ master_config_hook }}"
when: master_config_hook is defined
@@ -228,6 +166,12 @@
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
run_once: true
+ - name: Reconcile Jenkins Pipeline Role Bindings
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ run_once: true
+ when: openshift.common.version_gte_3_4_or_1_4 | bool
+
- name: Reconcile Security Context Constraints
command: >
{{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 1f314c854..86b344d7a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -17,7 +17,7 @@
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Determine if node is currently scheduleable
command: >
- {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
changed_when: false
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -39,13 +39,18 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
+
tasks:
+
- include: docker/upgrade.yml
+ vars:
+ # We will restart Docker ourselves after everything is ready:
+ skip_docker_restart: True
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: "{{ node_config_hook }}"
@@ -53,23 +58,41 @@
- include: rpm_upgrade.yml
vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+ - name: Remove obsolete docker-sdn-ovs.conf
+ file: path=/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf state=absent
+ when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>=')) or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
+
- include: containerized_node_upgrade.yml
when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
- - meta: flush_handlers
+ - name: Ensure containerized services stopped before Docker restart
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+ # Mandatory Docker restart, ensure all containerized services are running:
+ - include: docker/restart.yml
+
+ - name: Restart rpm node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
- name: Set node schedulability
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
until: node_sched.rc == 0
retries: 3
delay: 1
-
-
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
new file mode 100644
index 000000000..88f2ddc78
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -0,0 +1,166 @@
+---
+# Upgrade predicates
+- vars:
+ prev_predicates: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, regions_enabled=False) }}"
+ default_predicates_no_region: "{{ lookup('openshift_master_facts_default_predicates', regions_enabled=False) }}"
+ # older_predicates are the set of predicates that have previously been
+ # hard-coded into openshift_facts
+ older_predicates:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: Region
+ argument:
+ serviceAffinity:
+ labels:
+ - region
+ # older_predicates_no_region are the set of predicates that have previously
+ # been hard-coded into openshift_facts, with the Region predicate removed
+ older_predicates_no_region:
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - name: MaxEBSVolumeCount
+ - name: MaxGCEPDVolumeCount
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ - name: NoVolumeZoneConflict
+ - - name: MatchNodeSelector
+ - name: PodFitsResources
+ - name: PodFitsPorts
+ - name: NoDiskConflict
+ block:
+
+ # Handle case where openshift_master_predicates is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+
+ # Handle cases where openshift_master_predicates is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
+ when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
+ openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
+ when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
+ openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+
+ when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+
+
+# Upgrade priorities
+- vars:
+ prev_priorities: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type) }}"
+ prev_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', short_version=openshift_upgrade_min, deployment_type=openshift_deployment_type, zones_enabled=False) }}"
+ default_priorities_no_zone: "{{ lookup('openshift_master_facts_default_priorities', zones_enabled=False) }}"
+ # older_priorities are the set of priorities that have previously been
+ # hard-coded into openshift_facts
+ older_priorities:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ - name: Zone
+ weight: 2
+ argument:
+ serviceAntiAffinity:
+ label: zone
+ # older_priorities_no_region are the set of priorities that have previously
+ # been hard-coded into openshift_facts, with the Zone priority removed
+ older_priorities_no_zone:
+ - - name: LeastRequestedPriority
+ weight: 1
+ - name: SelectorSpreadPriority
+ weight: 1
+ block:
+
+ # Handle case where openshift_master_priorities is defined
+ - block:
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+
+ - debug:
+ msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+
+ # Handle cases where openshift_master_priorities is not defined
+ - block:
+ - debug:
+ msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
+ when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
+ openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+
+ - set_fact:
+ openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
+ when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
+ openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+
+ when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+
+
+# Update scheduler
+- vars:
+ scheduler_config:
+ kind: Policy
+ apiVersion: v1
+ predicates: "{{ openshift_upgrade_scheduler_predicates
+ | default(openshift_master_scheduler_current_predicates) }}"
+ priorities: "{{ openshift_upgrade_scheduler_priorities
+ | default(openshift_master_scheduler_current_priorities) }}"
+ block:
+ - name: Update scheduler config
+ copy:
+ content: "{{ scheduler_config | to_nice_json }}"
+ dest: "{{ openshift_master_scheduler_conf }}"
+ backup: true
+ when: "{{ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
deleted file mode 120000
index 49a51bba9..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
+++ /dev/null
@@ -1 +0,0 @@
-../files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 684eea343..68c71a132 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -48,3 +48,19 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
index 8f64636ae..89b524f14 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
@@ -18,4 +18,3 @@
dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
yaml_key: 'masterClientConnectionOverrides.qps'
yaml_value: 20
-
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
new file mode 100644
index 000000000..43c2ffcd4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "{{ 'admission_plugin_config' in openshift.master }}"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 50e25984f..48cc03b19 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -11,6 +11,6 @@
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
- seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+ prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
when: lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
deleted file mode 100644
index d75b23bf7..000000000
--- a/playbooks/common/openshift-cluster/verify_ansible_version.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Verify Ansible version is greater than or equal to 2.1.0.0
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Verify Ansible version is greater than or equal to 2.1.0.0
- fail:
- msg: "Unsupported ansible version: {{ ansible_version.full }} found"
- when: not ansible_version.full | version_compare('2.1.0.0', 'ge')
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index f460612ba..a039d30b8 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
+ - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
index efc80edf9..e413c2b3a 100644
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
+ - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index a53c55c14..39d64a126 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -53,7 +53,7 @@
when: openshift_hosted_metrics_deployer_prefix is not defined
- set_fact:
openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
- when: openshift_hosted_metrics_deployer_prefix is not defined
+ when: openshift_hosted_metrics_deployer_version is not defined
roles:
- openshift_facts
post_tasks:
@@ -74,11 +74,6 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- - openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_registry_storage_kind: 'nfs'
- when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
- name: Create temp directory for syncing certs
hosts: localhost
@@ -99,8 +94,8 @@
- openshift_facts:
role: master
local_facts:
- session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
- session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+ session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -133,9 +128,7 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: openshift_master_facts
- - role: openshift_hosted_facts
- - role: openshift_master_certificates
+ - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
@@ -145,42 +138,12 @@
| oo_select_keys(groups['oo_masters_to_config'] | default([]))
| oo_collect('openshift.common.all_hostnames')
| oo_flatten | unique }}"
- - role: openshift_etcd_client_certificates
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- - role: openshift_clock
- - role: openshift_cloud_provider
- - role: openshift_builddefaults
- - role: os_firewall
- os_firewall_allow:
- - service: etcd embedded
- port: 4001/tcp
- - service: api server https
- port: "{{ openshift.master.api_port }}/tcp"
- - service: api controllers https
- port: "{{ openshift.master.controllers_port }}/tcp"
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- - service: Fluentd td-agent tcp
- port: 24224/tcp
- - service: Fluentd td-agent udp
- port: 24224/udp
- - service: pcsd
- port: 2224/tcp
- - service: Corosync UDP
- port: 5404/udp
- - service: Corosync UDP
- port: 5405/udp
- - role: openshift_master
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- - role: nickhammond.logrotate
- - role: nuage_master
- when: openshift.common.use_nuage | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 5769ef5cd..7b340887a 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -13,12 +13,12 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
- - role: master
- local_facts:
- cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+ - role: common
+ local_facts:
+ rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+ - role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
# Creating a temp file on localhost, we then check each system that will
# be rebooted to see if that file exists, if so we know we're running
@@ -76,4 +76,3 @@
when: openshift.common.rolling_restart_mode == 'system'
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index b1c36718c..ffa23d26a 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master system
# https://github.com/ansible/ansible/issues/10616
shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
@@ -11,7 +12,7 @@
become: no
local_action:
module: wait_for
- host="{{ inventory_hostname }}"
+ host="{{ openshift.common.hostname }}"
state=started
delay=10
port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 5e539cd65..b40c32669 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,3 +1,4 @@
+---
- name: Restart master
service:
name: "{{ openshift.common.service_type }}-master"
@@ -9,13 +10,11 @@
state: restarted
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 5e5198335..43ef8b6a1 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
+ - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
index 8468014da..8c3f32403 100644
--- a/playbooks/common/openshift-nfs/service.yml
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -15,4 +15,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
+ - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 4824eeef3..b36c0eedf 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -60,30 +60,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -99,30 +77,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Additional node config
hosts: oo_nodes_to_config
@@ -139,6 +95,8 @@
- role: nuage_node
when: openshift.common.use_nuage | bool
- role: nickhammond.logrotate
+ - role: openshift_manage_node
+ openshift_master_host: "{{ groups.oo_first_master.0 }}"
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
@@ -152,35 +110,3 @@
tasks:
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-
-- name: Set node schedulability
- hosts: oo_first_master
- vars:
- openshift_nodes: "{{ groups.oo_nodes_to_config | default([]) }}"
- pre_tasks:
- # Necessary because when you're on a node that's also a master the master will be
- # restarted after the node restarts docker and it will take up to 60 seconds for
- # systemd to start the master again
- - name: Wait for master API to become available before proceeding
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
- when: openshift.common.is_containerized | bool
- roles:
- - openshift_manage_node
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 33095c9fb..2da68ceea 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -17,4 +17,4 @@
connection: ssh
gather_facts: no
tasks:
- - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
+ - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md
new file mode 100644
index 000000000..0514d6f50
--- /dev/null
+++ b/playbooks/gce/README.md
@@ -0,0 +1,4 @@
+# GCE playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index a7baea915..74e2420db 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 34dcd2496..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 7c8189224..65dd2b71e 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,10 +9,11 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
-# unsupported in 1.9.+
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
+ # unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
- - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+ - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}
- environment-{{ cluster_env }}
- clusterid-{{ cluster_id }}
- host-type-{{ type }}
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 68e60f9d4..afe269b7c 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -33,18 +33,17 @@
vars_files:
- vars.yml
tasks:
-
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- name: "{{ item }}"
- service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- project_id: "{{ lookup('env', 'gce_project_id') }}"
- zone: "{{ lookup('env', 'zone') }}"
- with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
- when: item is defined
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
+ when: item is defined
#- include: ../openshift-node/terminate.yml
# vars:
diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md
new file mode 100644
index 000000000..3ce46a76f
--- /dev/null
+++ b/playbooks/libvirt/README.md
@@ -0,0 +1,4 @@
+# libvirt playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index a7baea915..74e2420db 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
- | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+ | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 299325fc4..44b0f5a3c 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -3,8 +3,6 @@
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
gather_facts: no
tasks:
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 86d5d0aad..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index e0afc43ba..78581fdfe 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -116,6 +116,7 @@
ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
+ libvirt_ip_address: "{{ item.1 }}"
with_together:
- '{{ instances }}'
- '{{ ips }}'
@@ -133,5 +134,5 @@
retries: 30
delay: 1
with_together:
- - '{{ instances }}'
- - '{{ ips }}'
+ - '{{ instances }}'
+ - '{{ ips }}'
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index 81e6d8f05..8a63d11a5 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -68,4 +68,3 @@
path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: absent
with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index 4daaf1c91..5156789e7 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -12,10 +12,10 @@ debug_level: 2
# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
deployment_rhel7_ent_base:
image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
compression: ""
@@ -25,12 +25,12 @@ deployment_rhel7_ent_base:
deployment_vars:
origin:
image:
- url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
- compression: "{{ lookup('oo_option', 'image_compression') |
- default('xz', True) }}"
- name: "{{ lookup('oo_option', 'image_name') |
- default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+ url: "{{ lookup('oo_option', 'image_url') |
+ default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
+ compression: "{{ lookup('oo_option', 'image_compression') |
+ default('xz', True) }}"
+ name: "{{ lookup('oo_option', 'image_name') |
+ default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
new file mode 100644
index 000000000..a6d8d6995
--- /dev/null
+++ b/playbooks/openstack/README.md
@@ -0,0 +1,4 @@
+# OpenStack playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 12c436eaf..98434439c 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
- | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index f6550b2c4..1366c83ca 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,6 +1,4 @@
---
-- include: ../../common/openshift-cluster/verify_ansible_version.yml
-
- hosts: localhost
gather_facts: no
tasks:
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index eb2c4269a..c0bc12f55 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -25,7 +25,7 @@
- name: Create or Update OpenStack Stack
command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
- --timeout 3
+ --timeout {{ openstack_heat_timeout }}
-P cluster_env={{ cluster_env }}
-P cluster_id={{ cluster_id }}
-P subnet_24_prefix={{ openstack_subnet_24_prefix }}
@@ -107,10 +107,13 @@
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "etcd"
+ openstack:
+ public_v4: '{{ item[2] }}'
+ private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.etcd_names }}'
- - '{{ parsed_outputs.etcd_ips }}'
- - '{{ parsed_outputs.etcd_floating_ips }}'
+ - '{{ parsed_outputs.etcd_names }}'
+ - '{{ parsed_outputs.etcd_ips }}'
+ - '{{ parsed_outputs.etcd_floating_ips }}'
- name: Add new master instances groups and variables
add_host:
@@ -121,10 +124,13 @@
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "master"
+ openstack:
+ public_v4: '{{ item[2] }}'
+ private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.master_names }}'
- - '{{ parsed_outputs.master_ips }}'
- - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.master_names }}'
+ - '{{ parsed_outputs.master_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
- name: Add new node instances groups and variables
add_host:
@@ -135,10 +141,13 @@
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "compute"
+ openstack:
+ public_v4: '{{ item[2] }}'
+ private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.node_names }}'
- - '{{ parsed_outputs.node_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.node_names }}'
+ - '{{ parsed_outputs.node_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
- name: Add new infra instances groups and variables
add_host:
@@ -149,19 +158,22 @@
groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "infra"
+ openstack:
+ public_v4: '{{ item[2] }}'
+ private_v4: '{{ item[1] }}'
with_together:
- - '{{ parsed_outputs.infra_names }}'
- - '{{ parsed_outputs.infra_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.infra_names }}'
+ - '{{ parsed_outputs.infra_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Wait for ssh
wait_for:
host: '{{ item }}'
port: 22
with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -170,9 +182,9 @@
retries: 30
delay: 1
with_flattened:
- - '{{ parsed_outputs.master_floating_ips }}'
- - '{{ parsed_outputs.node_floating_ips }}'
- - '{{ parsed_outputs.infra_floating_ips }}'
+ - '{{ parsed_outputs.master_floating_ips }}'
+ - '{{ parsed_outputs.node_floating_ips }}'
+ - '{{ parsed_outputs.infra_floating_ips }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 4527f4a28..affb57117 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -1,3 +1,4 @@
+---
- name: Terminate instance(s)
hosts: localhost
become: no
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 62111dacf..ba2855b73 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -1,3 +1,4 @@
+# yamllint disable rule:colons
---
debug_level: 2
openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
@@ -14,6 +15,8 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
default('0.0.0.0/0', True) }}"
+openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') |
+ default('3', True) }}"
openstack_flavor:
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"