summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/metrics_setup/README.md25
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml37
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics.yaml116
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml45
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml10
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/uninstall.yml16
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml3
-rw-r--r--playbooks/aws/openshift-cluster/list.yml11
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml18
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml12
-rw-r--r--playbooks/aws/openshift-cluster/update.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml26
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/README.md18
l---------playbooks/byo/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml96
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml98
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml100
-rw-r--r--playbooks/byo/openshift-master/restart.yml2
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/byo/rhel_subscribe.yml2
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml16
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml14
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml73
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml47
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
l---------playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml2
l---------playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-etcd/service.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml2
-rw-r--r--playbooks/common/openshift-master/restart.yml74
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml26
-rw-r--r--playbooks/common/openshift-master/restart_hosts_pacemaker.yml25
-rw-r--r--playbooks/common/openshift-master/restart_services_pacemaker.yml10
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-master/service.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/common/openshift-node/service.yml2
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml16
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml7
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml4
-rw-r--r--playbooks/gce/openshift-cluster/update.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml16
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml2
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml14
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml4
72 files changed, 780 insertions, 1131 deletions
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md
deleted file mode 100644
index 71aa1e109..000000000
--- a/playbooks/adhoc/metrics_setup/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift
-
-See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster
-
-
-Add the following vars to `[OSEv3:vars]` section of your inventory file
-```
-[OSEv3:vars]
-# Enable cluster metrics
-use_cluster_metrics=true
-metrics_external_service=< external service name for metrics >
-metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/
-metrics_image_version=3.1.0
-```
-
-Run playbook
-```
-ansible-playbook -i $INVENTORY_FILE playbooks/install.yml
-```
-
-## Contact
-Email: hawkular-dev@lists.jboss.org
-
-## Credits
-Playbook adapted from install shell scripts by Matt Mahoney
diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
deleted file mode 100644
index f70e0b18b..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "List"
-metadata:
- name: metrics-deployer-setup
- annotations:
- description: "Required dependencies for the metrics deployer pod."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-items:
--
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml
deleted file mode 100644
index d823b2587..000000000
--- a/playbooks/adhoc/metrics_setup/files/metrics.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-apiVersion: "v1"
-kind: "Template"
-metadata:
- name: metrics-deployer-template
- annotations:
- description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
- tags: "infrastructure"
-labels:
- metrics-infra: deployer
- provider: openshift
- component: deployer
-objects:
--
- apiVersion: v1
- kind: Pod
- metadata:
- generateName: metrics-deployer-
- spec:
- containers:
- - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
- name: deployer
- volumeMounts:
- - name: secret
- mountPath: /secret
- readOnly: true
- - name: empty
- mountPath: /etc/deploy
- env:
- - name: PROJECT
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: IMAGE_PREFIX
- value: ${IMAGE_PREFIX}
- - name: IMAGE_VERSION
- value: ${IMAGE_VERSION}
- - name: PUBLIC_MASTER_URL
- value: ${PUBLIC_MASTER_URL}
- - name: MASTER_URL
- value: ${MASTER_URL}
- - name: REDEPLOY
- value: ${REDEPLOY}
- - name: USE_PERSISTENT_STORAGE
- value: ${USE_PERSISTENT_STORAGE}
- - name: HAWKULAR_METRICS_HOSTNAME
- value: ${HAWKULAR_METRICS_HOSTNAME}
- - name: CASSANDRA_NODES
- value: ${CASSANDRA_NODES}
- - name: CASSANDRA_PV_SIZE
- value: ${CASSANDRA_PV_SIZE}
- - name: METRIC_DURATION
- value: ${METRIC_DURATION}
- dnsPolicy: ClusterFirst
- restartPolicy: Never
- serviceAccount: metrics-deployer
- volumes:
- - name: empty
- emptyDir: {}
- - name: secret
- secret:
- secretName: metrics-deployer
-parameters:
--
- description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
- name: IMAGE_PREFIX
- value: "hawkular/"
--
- description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
- name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
--
- description: "Internal URL for the master, for authentication retrieval"
- name: MASTER_URL
- value: "https://kubernetes.default.svc:443"
--
- description: "External hostname where clients will reach Hawkular Metrics"
- name: HAWKULAR_METRICS_HOSTNAME
- required: true
--
- description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
- name: REDEPLOY
- value: "false"
--
- description: "Set to true for persistent storage, set to false to use non persistent storage"
- name: USE_PERSISTENT_STORAGE
- value: "true"
--
- description: "The number of Cassandra Nodes to deploy for the initial cluster"
- name: CASSANDRA_NODES
- value: "1"
--
- description: "The persistent volume size for each of the Cassandra nodes"
- name: CASSANDRA_PV_SIZE
- value: "1Gi"
--
- description: "How many days metrics should be stored for."
- name: METRIC_DURATION
- value: "7"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
deleted file mode 100644
index a9ec3c1ef..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/install.yml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- include: master_config_facts.yml
-- name: "Install metrics"
- hosts: masters
- vars:
- metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics"
- tasks:
- - name: "Add metrics url to master config"
- lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'"
-
- - name: "Restart master service"
- service: name=atomic-openshift-master state=restarted
-
- - name: "Copy metrics-deployer yaml to remote"
- copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes"
-
- - name: "Add metrics-deployer"
- command: "{{item}}"
- run_once: true
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- with_items:
- - oc project openshift-infra
- - oc create -f /tmp/metrics-deployer-setup.yaml
-
- - name: "Give metrics-deployer SA permissions"
- command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
- run_once: true
-
- - name: "Give heapster SA permissions"
- command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
- run_once: true
-
- - name: "Create metrics-deployer secret"
- command: "oc secrets new metrics-deployer nothing=/dev/null"
- register: output
- failed_when: ('already exists' not in output.stderr) and (output.rc != 0)
- run_once: true
-
- - name: "Copy metrics.yaml to remote"
- copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
-
- - name: "Process yml template"
- shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
- run_once: true \ No newline at end of file
diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
deleted file mode 100644
index 65de11bc4..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: "Load master config"
- hosts: masters
- vars:
- master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}"
- tasks:
- - name: "Fetch master config from remote"
- fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes"
- - name: "Load config"
- include_vars: "{{ master_config_file }}"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
deleted file mode 100644
index 06c4586ee..000000000
--- a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: "Uninstall metrics"
- hosts: masters
- tasks:
- - name: "Remove metrics url from master config"
- lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'"
-
- - name: "Delete metrics objects"
- command: "{{item}}"
- with_items:
- - oc delete all --selector=metrics-infra
- # - oc delete secrets --selector=metrics-infra
- # - oc delete sa --selector=metrics-infra
- - oc delete templates --selector=metrics-infra
- - oc delete sa metrics-deployer
- - oc delete secret metrics-deployer
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 79cae24ab..32fc7ce68 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -33,7 +33,7 @@
- add_host:
name: "{{ item }}"
groups: problem_hosts_group
- with_items: problem_hosts
+ with_items: "{{ problem_hosts }}"
- name: "Run on problem hosts"
hosts: problem_hosts_group
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 789f66b14..4ea639cbe 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -338,6 +338,7 @@
failed_when: False
with_items:
- etcd
+ - etcd3
- firewalld
- name: Stop additional atomic services
@@ -352,6 +353,7 @@
when: not is_atomic | bool
with_items:
- etcd
+ - etcd3
- shell: systemctl reset-failed
changed_when: False
@@ -365,6 +367,7 @@
- /etc/ansible/facts.d/openshift.fact
- /etc/etcd
- /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index a542b4ca3..ed8aac398 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -16,11 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-
-- name: List Hosts
- hosts: oo_list_hosts
- gather_facts: no
- tasks:
+ oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
+ oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index d22c86cda..4d76d3bfe 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -99,8 +99,8 @@
- name: Add Name tag to instances
ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
args:
tags:
Name: "{{ item.0 }}"
@@ -154,8 +154,8 @@
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
- name: Add new instances to nodes_to_add group if needed
add_host:
@@ -169,13 +169,13 @@
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
when: oo_extend_env is defined and oo_extend_env | bool
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
- with_items: ec2.instances
+ with_items: "{{ ec2.instances }}"
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
@@ -184,5 +184,5 @@
retries: 20
delay: 10
with_together:
- - instances
- - ec2.instances
+ - "{{ instances }}"
+ - "{{ ec2.instances }}"
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index fb13e1839..7a8375d0e 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -12,7 +12,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
+ with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -39,7 +39,7 @@
clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
- with_items: groups.oo_hosts_to_terminate
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
when: "'oo_hosts_to_terminate' in groups"
- name: Terminate instances
@@ -49,7 +49,7 @@
region: "{{ hostvars[item].ec2_region }}"
ignore_errors: yes
register: ec2_term
- with_items: groups.oo_hosts_to_terminate
+ with_items: "{{ groups.oo_hosts_to_terminate }}"
when: "'oo_hosts_to_terminate' in groups"
# Fail if any of the instances failed to terminate with an error other
@@ -57,7 +57,7 @@
- fail:
msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: ec2_term.results
+ with_items: "{{ ec2_term.results }}"
- name: Stop instance if termination failed
ec2:
@@ -66,12 +66,12 @@
region: "{{ item.item.ec2_region }}"
register: ec2_stop
when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
- with_items: ec2_term.results
+ with_items: "{{ ec2_term.results }}"
- name: Rename stopped instances
ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
args:
tags:
Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
- with_items: ec2_stop.results
+ with_items: "{{ ec2_stop.results }}"
when: ec2_stop | changed
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index d762203b2..ed05d61ed 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index ca01dbc9d..de4e34e2d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.0 to latest minor release](v3_0_minor/README.md)
-- [OpenShift Enterprise 3.0 to 3.1](v3_0_to_v3_1/README.md)
+- [OpenShift Enterprise 3.2 to 3.3](v3_3/README.md)
+- [OpenShift Enterprise 3.1 to 3.2](v3_2/README.md)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 9be6becc1..834461e14 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -25,13 +25,13 @@
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
@@ -40,7 +40,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 1755203a4..a3ab78ccf 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
changed_when: false
- hosts: l_oo_all_hosts
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
new file mode 100644
index 000000000..c25f96212
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -0,0 +1,26 @@
+---
+- include: ../../../common/openshift-cluster/verify_ansible_version.yml
+
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../cluster_hosts.yml
+
+- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
index 5d549eee7..d92761e48 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 7a3829283..9a5d84751 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -92,10 +92,9 @@
vars:
master_config_hook: "v3_3/master_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
-- include: ../../../openshift-master/restart.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d6af71827..c9338a960 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -98,3 +98,4 @@
master_config_hook: "v3_3/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
new file mode 100644
index 000000000..85b807dc6
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/README.md
@@ -0,0 +1,18 @@
+# v3.4 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..4f8a80ee8
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,96 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..8cde2ac88
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,98 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..f385d4f22
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,100 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 0cf669ae3..0a163526a 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index fced79262..279eeab21 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 5737bb0e0..902221931 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index f093411ef..f36caeb36 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -8,7 +8,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 26b31d313..825f46415 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -11,6 +11,8 @@
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
when: openshift.common.use_manageiq | bool
- role: cockpit
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 7112a6084..2f384ddea 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,5 +1,21 @@
---
# NOTE: requires openshift_facts be run
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ # See:
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
+ # https://github.com/openshift/openshift-ansible/issues/1138
+ - name: Check for bad combinations of yum and subscription-manager
+ command: >
+ {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
+ register: yum_ver_test
+ changed_when: false
+ - fail:
+ msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
+ when: "'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 2ba7fded5..ccbba54b4 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -56,13 +56,13 @@
openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
- openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
- openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}"
- role: cockpit-ui
- when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool )
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 4996c56a7..5f008a045 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -224,7 +224,7 @@
- name: Prepare for node evacuation
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -232,7 +232,7 @@
- name: Evacuate node
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -240,7 +240,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
index 32a3636aa..439df5ffd 100644
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
@@ -1,5 +1,3 @@
-- include_vars: ../../../../roles/openshift_node/vars/main.yml
-
- name: Update systemd units
include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
index 78f6c46f3..23cf8cf76 100644
--- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -22,11 +22,11 @@
- name: Create service signer certificate
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
- --cert=service-signer.crt
- --key=service-signer.key
- --name=openshift-service-serving-signer
- --serial=service-signer.serial.txt
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
+ --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt
+ --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key
+ --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer
+ --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt
args:
chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index fc26d029e..ee75aa853 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -22,22 +22,24 @@
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ # Don't expect docker rpm to be available on hosts that don't already have it installed:
+ when: pkg_check.rc == 0
failed_when: false
changed_when: false
- fail:
msg: This playbook requires access to Docker 1.10 or later
# Disable the 1.10 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<'))
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
l_docker_upgrade: False
-# Make sure a docker_verison is set if none was requested:
+# Make sure a docker_version is set if none was requested:
- set_fact:
docker_version: "{{ avail_docker_version.stdout }}"
- when: docker_version is not defined
+ when: pkg_check.rc == 0 and docker_version is not defined
- name: Flag for Docker upgrade if necessary
set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
new file mode 100644
index 000000000..57b156b1c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -0,0 +1,73 @@
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=present"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
new file mode 100644
index 000000000..35f391f8c
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -0,0 +1,47 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Get current image
+ shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
+ register: current_image
+
+- name: Set new_etcd_image
+ set_fact:
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
+ else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+
+- name: Pull new etcd image
+ command: "docker pull {{ new_etcd_image }}"
+
+- name: Update to latest etcd image
+ replace:
+ dest: /etc/systemd/system/etcd_container.service
+ regexp: "{{ current_image.stdout }}$"
+ replace: "{{ new_etcd_image }}"
+
+- name: Restart etcd_container
+ systemd:
+ name: etcd_container
+ daemon_reload: yes
+ state: restarted
+
+## TODO: probably should just move this into the backup playbooks, also this
+## will fail on atomic host. We need to revisit how to do etcd backups there as
+## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
+- name: Upgrade etcd for etcdctl when not atomic
+ action: "{{ ansible_pkg_mgr }} name=etcd ensure=latest"
+ when: not openshift.common.is_atomic | bool
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
+
+- name: Store new etcd_image
+ openshift_facts:
+ role: etcd
+ local_facts:
+ etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
new file mode 100644
index 000000000..30232110e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
@@ -0,0 +1,23 @@
+---
+# F23 GA'd with etcd 2.0, currently has 2.2 in updates
+# F24 GA'd with etcd-2.2, currently has 2.2 in updates
+# F25 Beta currently has etcd 3.0
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd
+ package:
+ name: "etcd"
+ state: "latest"
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
new file mode 120000
index 000000000..641e04e44
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
@@ -0,0 +1 @@
+../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
new file mode 100644
index 000000000..cce844403
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -0,0 +1,122 @@
+---
+# For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to
+# upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius
+# task for RHEL and CENTOS it's simply not possible in Fedora unless you've
+# mirrored packages on your own because only the GA and latest versions are
+# available in the repos. So for Fedora we'll simply skip this, sorry.
+
+- include: ../../evaluate_groups.yml
+ tags:
+ - always
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_upgrade, etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
+
+- name: Backup etcd before upgrading anything
+ include: backup.yml
+ vars:
+ backup_tag: "pre-upgrade-"
+
+- name: Drop etcdctl profiles
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - include: roles/etcd/tasks/etcdctl.yml
+
+- name: Determine etcd version
+ hosts: etcd_hosts_to_upgrade
+ tasks:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: not openshift.common.is_containerized | bool
+ - name: Record containerized etcd version
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_installed_version
+ failed_when: false
+ when: openshift.common.is_containerized | bool
+
+# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
+# through hosts, then loop through tasks only when appropriate
+- name: Upgrade to 2.1
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.1'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.2
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.2'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.2.5
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.2.5
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 2.3
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '2.3'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to 2.3.7
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 2.3.7
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade RPM hosts to 3.0
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: '3.0'
+ tasks:
+ - include: rhel_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+
+- name: Upgrade containerized hosts to etcd3 image
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ vars:
+ upgrade_version: 3.0.3
+ tasks:
+ - include: containerized_tasks.yml
+ when: etcd_installed_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+
+- name: Upgrade fedora to latest
+ hosts: etcd_hosts_to_upgrade
+ serial: 1
+ tasks:
+ - include: fedora_tasks.yml
+ when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+
+- name: Backup etcd
+ include: backup.yml
+ vars:
+ backup_tag: "post-3.0-"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
new file mode 100644
index 000000000..8e7dc9d9b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify cluster is healthy pre-upgrade
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+
+- name: Update etcd package but exclude etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd-{{ upgrade_version }}\\* --exclude etcd3"
+ when: upgrade_version | version_compare('3.0','<')
+
+- name: Update etcd package not excluding etcd3
+ command: "{{ ansible_pkg_mgr }} install -y etcd3-{{ upgrade_version }}\\*"
+ when: not upgrade_version | version_compare('3.0','<')
+
+- name: Restart etcd
+ service:
+ name: etcd
+ state: restarted
+
+- name: Verify cluster is healthy
+ command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
+ register: etcdctl
+ until: etcdctl.rc == 0
+ retries: 3
+ delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/roles b/playbooks/common/openshift-cluster/upgrades/etcd/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index f3b3abe0d..fbdb7900a 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -10,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
new file mode 120000
index 000000000..514526fe2
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/openvswitch-avoid-oom.conf
@@ -0,0 +1 @@
+../../../../roles/openshift_node/templates/openvswitch-avoid-oom.conf \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..2bbcbe1f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -17,10 +17,14 @@
# not already exist. We could have potentially done a replace --force to
# create and update in one step.
- openshift_examples
+ - openshift_hosted_templates
# Update the existing templates
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
openshift_examples_import_command: replace
+ - role: openshift_hosted_templates
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_hosted_templates_import_command: replace
pre_tasks:
- name: Collect all routers
command: >
@@ -41,7 +45,7 @@
{{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
--api-version=v1
- with_items: haproxy_routers
+ with_items: "{{ haproxy_routers }}"
- name: Check for default registry
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index af77f140f..cd1139b29 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -5,3 +5,7 @@
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+ when: component == "node"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 2c641e21e..57c25aa41 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -11,82 +11,24 @@
add_host:
name: "{{ item }}"
groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+# If facts cache were for some reason deleted, this fact may not be set, and if not set
+# it will always default to true. This causes problems for the etcd data dir fact detection
+# so we must first make sure this is set correctly before attempting the backup.
+- name: Set master embedded_etcd fact
+ hosts: oo_masters_to_config
roles:
- openshift_facts
tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+ role: master
+ local_facts:
+ embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
+- name: Backup etcd
+ include: ./etcd/backup.yml
- name: Upgrade master packages
hosts: oo_masters_to_config
@@ -99,6 +41,8 @@
- include: rpm_upgrade.yml component=master
when: not openshift.common.is_containerized | bool
+# Create service signer cert when missing. Service signer certificate
+# is added to master config in the master config hook for v3_3.
- name: Determine if service signer cert must be created
hosts: oo_first_master
tasks:
@@ -108,8 +52,6 @@
register: service_signer_cert_stat
changed_when: false
-# Create service signer cert when missing. Service signer certificate
-# is added to master config in the master config hook for v3_3.
- include: create_service_signer_cert.yml
- name: Upgrade master config and systemd units
@@ -128,13 +70,6 @@
- name: Update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
-# - name: Upgrade master configuration
-# openshift_upgrade_config:
-# from_version: '3.1'
-# to_version: '3.2'
-# role: master
-# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
- name: Check for ca-bundle.crt
stat:
path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
@@ -184,6 +119,10 @@
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
+# We are now ready to restart master services (or entire system
+# depending on openshift_rolling_restart_mode):
+- include: ../../openshift-master/restart.yml
+
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
@@ -200,19 +139,15 @@
# restart.
skip_docker_role: True
tasks:
- - name: Verifying the correct commandline tools are available
- shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
- when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
-
- name: Reconcile Cluster Roles
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
command: >
- {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-role-bindings
--exclude-groups=system:authenticated
--exclude-groups=system:authenticated:oauth
@@ -222,9 +157,15 @@
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
run_once: true
+ - name: Reconcile Jenkins Pipeline Role Bindings
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ run_once: true
+ when: openshift.common.version_gte_3_4_or_1_4 | bool
+
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
run_once: true
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 9b572dcdf..1f314c854 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -29,7 +29,7 @@
- name: Mark unschedulable if host is a node
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
# NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
tasks:
@@ -64,7 +64,7 @@
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
register: node_sched
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
index fd2bc24ae..f460612ba 100644
--- a/playbooks/common/openshift-etcd/service.yml
+++ b/playbooks/common/openshift-etcd/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_etcd
add_host: name={{ item }} groups=g_service_etcd
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change etcd state on etcd instance(s)
hosts: g_service_etcd
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
index e06a14c89..efc80edf9 100644
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_lb
add_host: name={{ item }} groups=g_service_lb
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on lb instance(s)
hosts: g_service_lb
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index a53c55c14..5fcb850a2 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -53,7 +53,7 @@
when: openshift_hosted_metrics_deployer_prefix is not defined
- set_fact:
openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}"
- when: openshift_hosted_metrics_deployer_prefix is not defined
+ when: openshift_hosted_metrics_deployer_version is not defined
roles:
- openshift_facts
post_tasks:
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 57a63cfee..5769ef5cd 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -66,63 +66,8 @@
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-- name: Determine which masters are currently active
- hosts: oo_masters_to_config
- any_errors_fatal: true
- tasks:
- - name: Check master service status
- command: >
- systemctl is-active {{ openshift.common.service_type }}-master
- register: active_check_output
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
- failed_when: false
- changed_when: false
- - set_fact:
- is_active: "{{ active_check_output.stdout == 'active' }}"
- when: openshift.master.cluster_method | default(None) == 'pacemaker'
-
-- name: Evaluate master groups
- hosts: localhost
- become: no
- tasks:
- - fail:
- msg: >
- Did not receive active status from any masters. Please verify pacemaker cluster.
- when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('is_active')
- | list) }}"
- - name: Evaluate oo_active_masters
- add_host:
- name: "{{ item }}"
- groups: oo_active_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['is_active'] | default(false)) | bool
- - name: Evaluate oo_current_masters
- add_host:
- name: "{{ item }}"
- groups: oo_current_masters
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ groups.oo_masters_to_config | default([]) }}"
- when: (hostvars[item]['current_host'] | default(false)) | bool
-
-- name: Validate pacemaker cluster
- hosts: oo_active_masters
- tasks:
- - name: Retrieve pcs status
- command: pcs status
- register: pcs_status_output
- changed_when: false
- - fail:
- msg: >
- Pacemaker cluster validation failed. One or more nodes are not online.
- when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
-
- name: Restart masters
- hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+ hosts: oo_masters_to_config
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
@@ -132,20 +77,3 @@
- include: restart_services.yml
when: openshift.common.rolling_restart_mode == 'services'
-- name: Restart active masters
- hosts: oo_active_masters
- serial: 1
- tasks:
- - include: restart_hosts_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services_pacemaker.yml
- when: openshift.common.rolling_restart_mode == 'services'
-
-- name: Restart current masters
- hosts: oo_current_masters
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ff206f5a2..b1c36718c 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -5,8 +5,8 @@
poll: 0
ignore_errors: true
become: yes
-# When cluster_method != pacemaker we can ensure the api_port is
-# available.
+
+# Ensure the api_port is available.
- name: Wait for master API to come back online
become: no
local_action:
@@ -15,25 +15,3 @@
state=started
delay=10
port="{{ openshift.master.api_port }}"
- when: openshift.master.cluster_method != 'pacemaker'
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port=22
- when: openshift.master.cluster_method == 'pacemaker'
-- name: Wait for master to become available
- command: pcs status
- register: pcs_status_output
- until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
- retries: 15
- delay: 2
- changed_when: false
- when: openshift.master.cluster_method == 'pacemaker'
-- fail:
- msg: >
- Pacemaker cluster validation failed {{ inventory hostname }} is not online.
- when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool
diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
deleted file mode 100644
index c9219e8de..000000000
--- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-- name: Fail over master resource
- command: >
- pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
-- name: Restart master system
- # https://github.com/ansible/ansible/issues/10616
- shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
- async: 1
- poll: 0
- ignore_errors: true
- become: yes
-- name: Wait for master to start
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml
deleted file mode 100644
index e738f3fb6..000000000
--- a/playbooks/common/openshift-master/restart_services_pacemaker.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-- name: Restart master services
- command: pcs resource restart master
-- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.master.cluster_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 56ed09e1b..18e5c665f 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -33,7 +33,7 @@
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- name: verify api server
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index f60c5a2b5..5e5198335 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_masters
add_host: name={{ item }} groups=g_service_masters
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on master instance(s)
hosts: g_service_masters
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
index 20c8ca248..8468014da 100644
--- a/playbooks/common/openshift-nfs/service.yml
+++ b/playbooks/common/openshift-nfs/service.yml
@@ -8,7 +8,7 @@
- name: Evaluate g_service_nfs
add_host: name={{ item }} groups=g_service_nfs
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on nfs instance(s)
hosts: g_service_nfs
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 5191662f7..4824eeef3 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -165,7 +165,7 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl --silent
+ curl --silent --tlsv1.2
{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
{% else %}
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index 0f07add2a..33095c9fb 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -10,7 +10,7 @@
- name: Evaluate g_service_nodes
add_host: name={{ item }} groups=g_service_nodes
- with_items: oo_host_group_exp | default([])
+ with_items: "{{ oo_host_group_exp | default([]) }}"
- name: Change state on node instance(s)
hosts: g_service_nodes
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
deleted file mode 100644
index fcaa3b850..000000000
--- a/playbooks/gce/openshift-cluster/library/gce.py
+++ /dev/null
@@ -1,543 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-DOCUMENTATION = '''
----
-module: gce
-version_added: "1.4"
-short_description: create or terminate GCE instances
-description:
- - Creates or terminates Google Compute Engine (GCE) instances. See
- U(https://cloud.google.com/products/compute-engine) for an overview.
- Full install/configuration instructions for the gce* modules can
- be found in the comments of ansible/test/gce_tests.py.
-options:
- image:
- description:
- - image string to use for the instance
- required: false
- default: "debian-7"
- instance_names:
- description:
- - a comma-separated list of instance names to create or destroy
- required: false
- default: null
- machine_type:
- description:
- - machine type to use for the instance, use 'n1-standard-1' by default
- required: false
- default: "n1-standard-1"
- metadata:
- description:
- - a hash/dictionary of custom data for the instance;
- '{"key":"value", ...}'
- required: false
- default: null
- service_account_email:
- version_added: "1.5.1"
- description:
- - service account email
- required: false
- default: null
- service_account_permissions:
- version_added: "2.0"
- description:
- - service account permissions (see
- U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
- --scopes section for detailed information)
- required: false
- default: null
- choices: [
- "bigquery", "cloud-platform", "compute-ro", "compute-rw",
- "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
- "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
- "storage-rw", "taskqueue", "userinfo-email"
- ]
- pem_file:
- version_added: "1.5.1"
- description:
- - path to the pem file associated with the service account email
- required: false
- default: null
- project_id:
- version_added: "1.5.1"
- description:
- - your GCE project ID
- required: false
- default: null
- name:
- description:
- - identifier when working with a single instance
- required: false
- network:
- description:
- - name of the network, 'default' will be used if not specified
- required: false
- default: "default"
- persistent_boot_disk:
- description:
- - if set, create the instance with a persistent boot disk
- required: false
- default: "false"
- disks:
- description:
- - a list of persistent disks to attach to the instance; a string value
- gives the name of the disk; alternatively, a dictionary value can
- define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
- will be the boot disk (which must be READ_WRITE).
- required: false
- default: null
- version_added: "1.7"
- state:
- description:
- - desired state of the resource
- required: false
- default: "present"
- choices: ["active", "present", "absent", "deleted"]
- tags:
- description:
- - a comma-separated list of tags to associate with the instance
- required: false
- default: null
- zone:
- description:
- - the GCE zone to use
- required: true
- default: "us-central1-a"
- ip_forward:
- version_added: "1.9"
- description:
- - set to true if the instance can forward ip packets (useful for
- gateways)
- required: false
- default: "false"
- external_ip:
- version_added: "1.9"
- description:
- - type of external ip, ephemeral by default
- required: false
- default: "ephemeral"
- disk_auto_delete:
- version_added: "1.9"
- description:
- - if set boot disk will be removed after instance destruction
- required: false
- default: "true"
-
-requirements:
- - "python >= 2.6"
- - "apache-libcloud >= 0.13.3"
-notes:
- - Either I(name) or I(instance_names) is required.
-author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
-'''
-
-EXAMPLES = '''
-# Basic provisioning example. Create a single Debian 7 instance in the
-# us-central1-a Zone of n1-standard-1 machine type.
-- local_action:
- module: gce
- name: test-instance
- zone: us-central1-a
- machine_type: n1-standard-1
- image: debian-7
-
-# Example using defaults and with metadata to create a single 'foo' instance
-- local_action:
- module: gce
- name: foo
- metadata: '{"db":"postgres", "group":"qa", "id":500}'
-
-
-# Launch instances from a control node, runs some tasks on the new instances,
-# and then terminate them
-- name: Create a sandbox instance
- hosts: localhost
- vars:
- names: foo,bar
- machine_type: n1-standard-1
- image: debian-6
- zone: us-central1-a
- service_account_email: unique-email@developer.gserviceaccount.com
- pem_file: /path/to/pem_file
- project_id: project-id
- tasks:
- - name: Launch instances
- local_action: gce instance_names={{names}} machine_type={{machine_type}}
- image={{image}} zone={{zone}}
- service_account_email={{ service_account_email }}
- pem_file={{ pem_file }} project_id={{ project_id }}
- register: gce
- - name: Wait for SSH to come up
- local_action: wait_for host={{item.public_ip}} port=22 delay=10
- timeout=60 state=started
- with_items: {{gce.instance_data}}
-
-- name: Configure instance(s)
- hosts: launched
- sudo: True
- roles:
- - my_awesome_role
- - my_awesome_tasks
-
-- name: Terminate instances
- hosts: localhost
- connection: local
- tasks:
- - name: Terminate instances that were previously launched
- local_action:
- module: gce
- state: 'absent'
- instance_names: {{gce.instance_names}}
-
-'''
-
-try:
- import libcloud
- from libcloud.compute.types import Provider
- from libcloud.compute.providers import get_driver
- from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
- ResourceExistsError, ResourceInUseError, ResourceNotFoundError
- _ = Provider.GCE
- HAS_LIBCLOUD = True
-except ImportError:
- HAS_LIBCLOUD = False
-
-try:
- from ast import literal_eval
- HAS_PYTHON26 = True
-except ImportError:
- HAS_PYTHON26 = False
-
-
-def get_instance_info(inst):
- """Retrieves instance information from an instance object and returns it
- as a dictionary.
-
- """
- metadata = {}
- if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
- for md in inst.extra['metadata']['items']:
- metadata[md['key']] = md['value']
-
- try:
- netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
- except:
- netname = None
- if 'disks' in inst.extra:
- disk_names = [disk_info['source'].split('/')[-1]
- for disk_info
- in sorted(inst.extra['disks'],
- key=lambda disk_info: disk_info['index'])]
- else:
- disk_names = []
-
- if len(inst.public_ips) == 0:
- public_ip = None
- else:
- public_ip = inst.public_ips[0]
-
- return({
- 'image': inst.image is not None and inst.image.split('/')[-1] or None,
- 'disks': disk_names,
- 'machine_type': inst.size,
- 'metadata': metadata,
- 'name': inst.name,
- 'network': netname,
- 'private_ip': inst.private_ips[0],
- 'public_ip': public_ip,
- 'status': ('status' in inst.extra) and inst.extra['status'] or None,
- 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
- 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
- })
-
-
-def create_instances(module, gce, instance_names):
- """Creates new instances. Attributes other than instance_names are picked
- up from 'module'
-
- module : AnsibleModule object
- gce: authenticated GCE libcloud driver
- instance_names: python list of instance names to create
-
- Returns:
- A list of dictionaries with instance information
- about the instances that were launched.
-
- """
- image = module.params.get('image')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- disks = module.params.get('disks')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- external_ip = module.params.get('external_ip')
- disk_auto_delete = module.params.get('disk_auto_delete')
- service_account_permissions = module.params.get('service_account_permissions')
- service_account_email = module.params.get('service_account_email')
-
- if external_ip == "none":
- external_ip = None
-
- new_instances = []
- changed = False
-
- lc_image = gce.ex_get_image(image)
- lc_disks = []
- disk_modes = []
- for i, disk in enumerate(disks or []):
- if isinstance(disk, dict):
- lc_disks.append(gce.ex_get_volume(disk['name']))
- disk_modes.append(disk['mode'])
- else:
- lc_disks.append(gce.ex_get_volume(disk))
- # boot disk is implicitly READ_WRITE
- disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
- lc_network = gce.ex_get_network(network)
- lc_machine_type = gce.ex_get_size(machine_type)
- lc_zone = gce.ex_get_zone(zone)
-
- # Try to convert the user's metadata value into the format expected
- # by GCE. First try to ensure user has proper quoting of a
- # dictionary-like syntax using 'literal_eval', then convert the python
- # dict into a python list of 'key' / 'value' dicts. Should end up
- # with:
- # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
- if metadata:
- if isinstance(metadata, dict):
- md = metadata
- else:
- try:
- md = literal_eval(str(metadata))
- if not isinstance(md, dict):
- raise ValueError('metadata must be a dict')
- except ValueError as e:
- module.fail_json(msg='bad metadata: %s' % str(e))
- except SyntaxError as e:
- module.fail_json(msg='bad metadata syntax')
-
- if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
- items = []
- for k, v in md.items():
- items.append({"key": k, "value": v})
- metadata = {'items': items}
- else:
- metadata = md
-
- ex_sa_perms = []
- bad_perms = []
- if service_account_permissions:
- for perm in service_account_permissions:
- if perm not in gce.SA_SCOPES_MAP.keys():
- bad_perms.append(perm)
- if len(bad_perms) > 0:
- module.fail_json(msg='bad permissions: %s' % str(bad_perms))
- if service_account_email:
- ex_sa_perms.append({'email': service_account_email})
- else:
- ex_sa_perms.append({'email': "default"})
- ex_sa_perms[0]['scopes'] = service_account_permissions
-
- # These variables all have default values but check just in case
- if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
- module.fail_json(msg='Missing required create instance variable',
- changed=False)
-
- for name in instance_names:
- pd = None
- if lc_disks:
- pd = lc_disks[0]
- elif persistent_boot_disk:
- try:
- pd = gce.create_volume(None, "%s" % name, image=lc_image)
- except ResourceExistsError:
- pd = gce.ex_get_volume("%s" % name, lc_zone)
- inst = None
- try:
- inst = gce.create_node(
- name, lc_machine_type, lc_image, location=lc_zone,
- ex_network=network, ex_tags=tags, ex_metadata=metadata,
- ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
- external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
- ex_service_accounts=ex_sa_perms
- )
- changed = True
- except ResourceExistsError:
- inst = gce.ex_get_node(name, lc_zone)
- except GoogleBaseError as e:
- module.fail_json(msg='Unexpected error attempting to create ' +
- 'instance %s, error: %s' % (name, e.value))
-
- for i, lc_disk in enumerate(lc_disks):
- # Check whether the disk is already attached
- if (len(inst.extra['disks']) > i):
- attached_disk = inst.extra['disks'][i]
- if attached_disk['source'] != lc_disk.extra['selfLink']:
- module.fail_json(
- msg=("Disk at index %d does not match: requested=%s found=%s" % (
- i, lc_disk.extra['selfLink'], attached_disk['source'])))
- elif attached_disk['mode'] != disk_modes[i]:
- module.fail_json(
- msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
- i, disk_modes[i], attached_disk['mode'])))
- else:
- continue
- gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
- # Work around libcloud bug: attached volumes don't get added
- # to the instance metadata. get_instance_info() only cares about
- # source and index.
- if len(inst.extra['disks']) != i+1:
- inst.extra['disks'].append(
- {'source': lc_disk.extra['selfLink'], 'index': i})
-
- if inst:
- new_instances.append(inst)
-
- instance_names = []
- instance_json_data = []
- for inst in new_instances:
- d = get_instance_info(inst)
- instance_names.append(d['name'])
- instance_json_data.append(d)
-
- return (changed, instance_json_data, instance_names)
-
-
-def terminate_instances(module, gce, instance_names, zone_name):
- """Terminates a list of instances.
-
- module: Ansible module object
- gce: authenticated GCE connection object
- instance_names: a list of instance names to terminate
- zone_name: the zone where the instances reside prior to termination
-
- Returns a dictionary of instance names that were terminated.
-
- """
- changed = False
- terminated_instance_names = []
- for name in instance_names:
- inst = None
- try:
- inst = gce.ex_get_node(name, zone_name)
- except ResourceNotFoundError:
- pass
- except Exception as e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
- if inst:
- gce.destroy_node(inst)
- terminated_instance_names.append(inst.name)
- changed = True
-
- return (changed, terminated_instance_names)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- image=dict(default='debian-7'),
- instance_names=dict(),
- machine_type=dict(default='n1-standard-1'),
- metadata=dict(),
- name=dict(),
- network=dict(default='default'),
- persistent_boot_disk=dict(type='bool', default=False),
- disks=dict(type='list'),
- state=dict(choices=['active', 'present', 'absent', 'deleted'],
- default='present'),
- tags=dict(type='list'),
- zone=dict(default='us-central1-a'),
- service_account_email=dict(),
- service_account_permissions=dict(type='list'),
- pem_file=dict(),
- project_id=dict(),
- ip_forward=dict(type='bool', default=False),
- external_ip=dict(choices=['ephemeral', 'none'],
- default='ephemeral'),
- disk_auto_delete=dict(type='bool', default=True),
- )
- )
-
- if not HAS_PYTHON26:
- module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
- if not HAS_LIBCLOUD:
- module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
-
- gce = gce_connect(module)
-
- image = module.params.get('image')
- instance_names = module.params.get('instance_names')
- machine_type = module.params.get('machine_type')
- metadata = module.params.get('metadata')
- name = module.params.get('name')
- network = module.params.get('network')
- persistent_boot_disk = module.params.get('persistent_boot_disk')
- state = module.params.get('state')
- tags = module.params.get('tags')
- zone = module.params.get('zone')
- ip_forward = module.params.get('ip_forward')
- changed = False
-
- inames = []
- if isinstance(instance_names, list):
- inames = instance_names
- elif isinstance(instance_names, str):
- inames = instance_names.split(',')
- if name:
- inames.append(name)
- if not inames:
- module.fail_json(msg='Must specify a "name" or "instance_names"',
- changed=False)
- if not zone:
- module.fail_json(msg='Must specify a "zone"', changed=False)
-
- json_output = {'zone': zone}
- if state in ['absent', 'deleted']:
- json_output['state'] = 'absent'
- (changed, terminated_instance_names) = terminate_instances(
- module, gce, inames, zone)
-
- # based on what user specified, return the same variable, although
- # value could be different if an instance could not be destroyed
- if instance_names:
- json_output['instance_names'] = terminated_instance_names
- elif name:
- json_output['name'] = name
-
- elif state in ['active', 'present']:
- json_output['state'] = 'present'
- (changed, instance_data, instance_name_list) = create_instances(
- module, gce, inames)
- json_output['instance_data'] = instance_data
- if instance_names:
- json_output['instance_names'] = instance_name_list
- elif name:
- json_output['name'] = name
-
- json_output['changed'] = changed
- module.exit_json(**json_output)
-
-# import module snippets
-from ansible.module_utils.basic import *
-from ansible.module_utils.gce import *
-if __name__ == '__main__':
- main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index c29cac272..34ab09533 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
+ oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}"
+ oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}"
+ with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 60cf21a5b..b7604580c 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -9,6 +9,7 @@
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
network: "{{ lookup('env', 'network') }}"
+ subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
# unsupported in 1.9.+
#service_account_permissions: "datastore,logging-write"
tags:
@@ -49,11 +50,11 @@
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
openshift_node_labels: "{{ node_label }}"
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
@@ -61,4 +62,4 @@
until: result.rc == 0
retries: 30
delay: 5
- with_items: gce.instance_data | default([], true)
+ with_items: "{{ gce.instance_data | default([], true) }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 6a0ac088a..68e60f9d4 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -12,7 +12,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
+ with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -43,7 +43,7 @@
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
zone: "{{ lookup('env', 'zone') }}"
- with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
when: item is defined
#- include: ../openshift-node/terminate.yml
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 332f27da7..6d2af3d26 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index eb64544db..579cd7ac6 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -16,18 +16,8 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost'])
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
+ oo_public_ipv4: ""
+ oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
+ with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index df5c52f2d..81e6d8f05 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -1,5 +1,5 @@
---
-# TODO: does not handle a non-existant cluster gracefully
+# TODO: does not handle a non-existent cluster gracefully
- name: Terminate instance(s)
hosts: localhost
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 755090f94..20ce47c07 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -45,7 +45,7 @@ parameters:
node_port_incoming:
type: string
label: Source of node port connections
- description: Authorized sources targetting node ports
+ description: Authorized sources targeting node ports
default: 0.0.0.0/0
num_etcd:
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index de68f5207..6c6f671be 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -17,18 +17,8 @@
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}"
+ oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}"
with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
-
-- name: List Hosts
- hosts: oo_list_hosts
-
-- name: List Hosts
- hosts: localhost
- become: no
- connection: local
- gather_facts: no
- vars_files:
- - vars.yml
- tasks:
- debug:
msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 332f27da7..6d2af3d26 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts }}"
- hosts: l_oo_all_hosts
gather_facts: no
@@ -27,7 +27,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml