summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j22
-rw-r--r--playbooks/adhoc/metrics_setup/README.md25
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml37
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics.yaml116
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml36
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml10
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/uninstall.yml16
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml3
-rw-r--r--playbooks/adhoc/uninstall.yml13
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml4
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml19
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml1
-rw-r--r--playbooks/common/openshift-cluster/config.yml22
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml57
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml286
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml159
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml8
-rw-r--r--playbooks/common/openshift-docker/config.yml9
l---------playbooks/common/openshift-docker/filter_plugins1
l---------playbooks/common/openshift-docker/lookup_plugins1
l---------playbooks/common/openshift-docker/roles1
-rw-r--r--playbooks/common/openshift-etcd/config.yml18
-rw-r--r--playbooks/common/openshift-master/config.yml54
-rw-r--r--playbooks/common/openshift-master/scaleup.yml5
-rw-r--r--playbooks/common/openshift-node/config.yml185
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml12
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data9
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml6
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml2
56 files changed, 1120 insertions, 192 deletions
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
index 5654ef6c4..df082614b 100644
--- a/playbooks/adhoc/create_pv/pv-template.j2
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -10,7 +10,7 @@ spec:
storage: {{ vol_size }}Gi
accessModes:
- ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
+ persistentVolumeReclaimPolicy: Retain
awsElasticBlockStore:
volumeID: aws://{{ vol_az }}/{{ vol_id }}
fsType: ext4
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md
new file mode 100644
index 000000000..71aa1e109
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/README.md
@@ -0,0 +1,25 @@
+## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift
+
+See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster
+
+
+Add the following vars to `[OSEv3:vars]` section of your inventory file
+```
+[OSEv3:vars]
+# Enable cluster metrics
+use_cluster_metrics=true
+metrics_external_service=< external service name for metrics >
+metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/
+metrics_image_version=3.1.0
+```
+
+Run playbook
+```
+ansible-playbook -i $INVENTORY_FILE playbooks/install.yml
+```
+
+## Contact
+Email: hawkular-dev@lists.jboss.org
+
+## Credits
+Playbook adapted from install shell scripts by Matt Mahoney
diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
new file mode 100644
index 000000000..f70e0b18b
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
@@ -0,0 +1,37 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "List"
+metadata:
+ name: metrics-deployer-setup
+ annotations:
+ description: "Required dependencies for the metrics deployer pod."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+items:
+-
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/files/metrics.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
new file mode 100644
index 000000000..235f775ef
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/install.yml
@@ -0,0 +1,36 @@
+---
+- include: master_config_facts.yml
+- name: "Install metrics"
+ hosts: masters
+ vars:
+ metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics"
+ tasks:
+ - name: "Add metrics url to master config"
+ lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'"
+
+ - name: "Restart master service"
+ service: name=atomic-openshift-master state=restarted
+
+ - name: "Copy metrics-deployer yaml to remote"
+ copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes"
+
+ - name: "Add metrics-deployer"
+ command: "{{item}}"
+ with_items:
+ - oc project openshift-infra
+ - oc create -f /tmp/metrics-deployer-setup.yaml
+
+ - name: "Give metrics-deployer SA permissions"
+ command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
+
+ - name: "Give heapster SA permissions"
+ command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
+
+ - name: "Create metrics-deployer secret"
+ command: "oc secrets new metrics-deployer nothing=/dev/null"
+
+ - name: "Copy metrics.yaml to remote"
+ copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
+
+ - name: "Process yml template"
+ shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
new file mode 100644
index 000000000..65de11bc4
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: "Load master config"
+ hosts: masters
+ vars:
+ master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}"
+ tasks:
+ - name: "Fetch master config from remote"
+ fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes"
+ - name: "Load config"
+ include_vars: "{{ master_config_file }}"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
new file mode 100644
index 000000000..06c4586ee
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
@@ -0,0 +1,16 @@
+---
+- name: "Uninstall metrics"
+ hosts: masters
+ tasks:
+ - name: "Remove metrics url from master config"
+ lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'"
+
+ - name: "Delete metrics objects"
+ command: "{{item}}"
+ with_items:
+ - oc delete all --selector=metrics-infra
+ # - oc delete secrets --selector=metrics-infra
+ # - oc delete sa --selector=metrics-infra
+ - oc delete templates --selector=metrics-infra
+ - oc delete sa metrics-deployer
+ - oc delete secret metrics-deployer
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
index 0dc021fbc..08e8f8968 100755
--- a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -49,5 +49,4 @@
name: "{{ item }}"
state: restarted
with_items:
- - oso-f22-host-monitoring
- - oso-rhel7-zagg-client
+ - oso-rhel7-host-monitoring
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 8b620d9ad..680964d80 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -55,7 +55,7 @@
- name: Stop additional atomic services
service: name={{ item }} state=stopped
- when: is_atomic | bool
+ when: is_containerized | bool
with_items:
- etcd_container
failed_when: false
@@ -73,9 +73,14 @@
- atomic-openshift-master
- atomic-openshift-node
- atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
- corosync
- etcd
- haproxy
+ - kubernetes-client
- openshift
- openshift-master
- openshift-node
@@ -132,7 +137,11 @@
register: exited_containers_to_delete
with_items:
- aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
- openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
- openshift/origin
- shell: "docker rm {{ item.stdout_lines | join(' ') }}"
@@ -147,6 +156,7 @@
with_items:
- registry\.access\..*redhat\.com/openshift3
- registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
- registry\.access\..*redhat\.com/rhel7/etcd
- docker.io/openshift
@@ -192,6 +202,7 @@
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/openshift-master
- /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
index ec28564cf..2f1d003ff 100755
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -8,8 +8,12 @@
g_server: http://localhost/zabbix/api_jsonrpc.php
g_user: Admin
g_password: zabbix
+ g_zbx_scriptrunner_user: scriptrunner
+ g_zbx_scriptrunner_bastion_host: specialhost.example.com
roles:
- role: os_zabbix
ozb_server: "{{ g_server }}"
ozb_user: "{{ g_user }}"
ozb_password: "{{ g_password }}"
+ ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
+ ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index 9a3361919..119b376aa 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index 8893db245..658204c17 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -14,4 +14,6 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+ | union(g_lb_hosts) | union(g_nfs_hosts)
+ | union(g_new_node_hosts)| union(g_new_master_hosts)
+ | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index b52456dcd..628a07752 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -5,6 +5,7 @@
vars:
g_etcd_hosts: "{{ groups.etcd | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
g_node_hosts: "{{ groups.nodes | default([]) }}"
g_lb_hosts: "{{ groups.lb | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
index c434be5b7..eb1f481d7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
@@ -4,7 +4,6 @@
This playbook currently performs the
following steps.
-**TODO: update for current steps**
* Upgrade and restart master services
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index e07e2b88e..8fadd2ce7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -5,6 +5,7 @@
vars:
g_etcd_hosts: "{{ groups.etcd | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
g_node_hosts: "{{ groups.nodes | default([]) }}"
g_lb_hosts: "{{ groups.lb | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 20fa9b10f..42078584b 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -3,6 +3,7 @@
vars:
g_etcd_hosts: "{{ groups.etcd | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
g_node_hosts: "{{ groups.nodes | default([]) }}"
g_lb_hosts: "{{ groups.lb | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
new file mode 100644
index 000000000..62577c3df
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
@@ -0,0 +1,16 @@
+# v3.1 to v3.2 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..0c91b51d6
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,19 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+ g_master_hosts: "{{ groups.masters | default([]) }}"
+ g_new_master_hosts: []
+ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+ g_node_hosts: "{{ groups.nodes | default([]) }}"
+ g_lb_hosts: "{{ groups.lb | default([]) }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 0f4e6ab88..1ac78468a 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -52,4 +52,5 @@
- role: openshift_router
when: deploy_infra | bool
- role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
when: deploy_infra | bool and attach_registry_volume | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 23c8f039e..2411e7360 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -3,7 +3,27 @@
- include: validate_hostnames.yml
-- include: ../openshift-docker/config.yml
+- name: Set oo_options
+ hosts: oo_hosts_to_config
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 432a92b49..ce0134c44 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -29,6 +29,14 @@
msg: The nfs group must be limited to one host
when: (groups[g_nfs_hosts] | default([])) | length > 1
+ - name: Evaluate oo_all_hosts
+ add_host:
+ name: "{{ item }}"
+ groups: oo_all_hosts
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_sudo: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_all_hosts | default([]) }}"
+
- name: Evaluate oo_masters
add_host:
name: "{{ item }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
new file mode 100644
index 000000000..239f43314
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+set -e
+
+SERVICE_TYPE=$1
+DEPLOYMENT_TYPE=$2
+VERSION="v${3}"
+
+add_image_version_to_sysconfig () {
+ unit_name=$2
+ sysconfig_file=/etc/sysconfig/${unit_name}
+
+ if ! grep IMAGE_VERSION ${sysconfig_file}; then
+ sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file}
+ else
+ sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file}
+ fi
+}
+
+add_image_version_to_unit () {
+ deployment_type=$1
+ unit_file=$2
+
+ if ! grep IMAGE_VERSION $unit_file; then
+ image_namespace="openshift/"
+ if [ $deployment_type == "atomic-enterprise" ]; then
+ image_namespace="aep3/"
+ elif [ $deployment_type == "openshift-enterprise" ]; then
+ image_namespace="openshift3/"
+ fi
+
+ sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file
+ fi
+}
+
+for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service); do
+ unit_name=$(basename -s .service ${unit_file})
+ add_image_version_to_sysconfig $VERSION $unit_name
+ add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file
+done
+
+if [ -e /etc/sysconfig/openvswitch ]; then
+ add_image_version_to_sysconfig $VERSION openvswitch
+else
+ echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch
+fi
+if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then
+ sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service
+fi
+add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service
+
+systemctl daemon-reload
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
new file mode 100644
index 000000000..7a1edf38f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
+installed_container_name=$(basename -s .service ${unit_file})
+installed=$(docker exec ${installed_container_name} openshift version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+if [ ${1} == "origin" ]; then
+ image_name="openshift/origin"
+elif grep aep $unit_file 2>&1 > /dev/null; then
+ image_name="aep3/aep"
+elif grep openshift3 $unit_file 2>&1 > /dev/null; then
+ image_name="openshift3/ose"
+fi
+
+docker pull ${image_name} 2>&1 > /dev/null
+available=$(docker run --rm ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
new file mode 100644
index 000000000..a2a9579b5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
deleted file mode 100644
index 3a1a8ebb1..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-echo "---"
-echo "curr_version: ${yum_installed}"
-echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 0fb38f32e..31ba8c4a9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -66,7 +66,7 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }} openshift
+ script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
register: g_versions_result
- set_fact:
@@ -212,13 +212,10 @@
- name: Update deployment type
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
roles:
- openshift_facts
- post_tasks:
- - openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ deployment_type }}"
- name: Update master facts
hosts: oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
index 12b9c84d3..66935e061 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -36,12 +36,13 @@
tasks:
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }}
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
register: g_versions_result
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index dbf746f12..54bb251f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -13,10 +13,11 @@
tasks:
- name: Upgrade master packages
command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
- name: Ensure python-yaml present for config upgrade
action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
- when: not openshift.common.is_atomic | bool
+ when: not openshift.common.is_containerized | bool
# Currently 3.1.1 does not have any new configuration settings
#
@@ -63,6 +64,7 @@
tasks:
- name: Upgrade node packages
command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ when: not openshift.common.is_containerized | bool
- name: Restart node service
service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
new file mode 100644
index 000000000..696994688
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -0,0 +1,9 @@
+- name: Update system_units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }}
+
+- name: Verifying the correct version was configured
+ command: grep {{ verify_upgrade_version }} {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
new file mode 100644
index 000000000..d9177e8a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
@@ -0,0 +1,14 @@
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+ register: docker_upgrade
+
+- name: Restart Docker
+ service: name=docker state=restarted
+ when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
new file mode 100644
index 000000000..3fd97ac14
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
new file mode 100644
index 000000000..668a80996
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -0,0 +1,286 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: not openshift_master_ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift_master_ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift_master_ha | bool and openshift.common.is_containerized | bool
+
+- name: Verify upgrade can proceed
+ hosts: oo_nodes_to_config
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
+
+- name: Verify upgrade can proceed
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ roles:
+ - openshift_cli
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_rpm_versions_result
+ when: not openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+ register: g_containerized_versions_result
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_image_tag | replace('v','') }}"
+ when: openshift_image_tag is defined
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
+
+ - name: Update systemd units
+ script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }}
+ when: openshift.common.is_containerized | bool
+
+ # Note: the version number is hardcoded here in hopes of catching potential
+ # bugs in how g_aos_versions.curr_version is set
+ - name: Verifying the correct version is installed for upgrade
+ shell: grep 3.1.1.6 {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+ - name: Determine available Docker
+ script: ../files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.9 or later
+ when: not openshift.common.is_atomic | bool
+ and (g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<'))
+
+ # TODO: add check to upgrade ostree to get latest Docker
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
new file mode 100644
index 000000000..7a2718e1b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -0,0 +1,6 @@
+- name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..d84d9f674
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,159 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+ - include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+ # This will restart the node
+ - name: Restart openvswitch service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index fd82997b9..0f562e019 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -6,14 +6,6 @@
roles:
- openshift_facts
tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml
deleted file mode 100644
index 092d5533c..000000000
--- a/playbooks/common/openshift-docker/config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-docker/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-docker/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-docker/lookup_plugins
deleted file mode 120000
index ac79701db..000000000
--- a/playbooks/common/openshift-docker/lookup_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/common/openshift-docker/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 93eb157cb..06fbd6862 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -5,17 +5,9 @@
- openshift_facts
tasks:
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- - role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
+ role: etcd
+ local_facts:
+ etcd_image: "{{ osm_etcd_image | default(None) }}"
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
@@ -89,8 +81,8 @@
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing
roles:
- - etcd
- - role: nickhammond.logrotate
+ - openshift_etcd
+ - nickhammond.logrotate
# Configure the remaining etcd hosts, skipping the first one we dealt with above.
- name: Configure remaining etcd hosts
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index b9d595576..972427c53 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,6 +1,9 @@
---
- name: Set master facts and determine if external etcd certs need to be generated
hosts: oo_masters_to_config
+ vars:
+ t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
+
pre_tasks:
- name: Check for RPM generated config marker file .config_managed
stat:
@@ -28,37 +31,31 @@
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
+
+ - set_fact:
+ openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
+ when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
roles:
- openshift_facts
post_tasks:
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- - role: master
- local_facts:
- api_port: "{{ openshift_master_api_port | default(None) }}"
- api_url: "{{ openshift_master_api_url | default(None) }}"
- api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
- controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
- public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
- cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
- cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- console_path: "{{ openshift_master_console_path | default(None) }}"
- console_port: "{{ openshift_master_console_port | default(None) }}"
- console_url: "{{ openshift_master_console_url | default(None) }}"
- console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
- public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
- master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
+ role: master
+ local_facts:
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
+ cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ portal_net: "{{ openshift_master_portal_net | default(None) }}"
+ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
+ master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- openshift_facts:
role: hosted
openshift_env:
@@ -227,6 +224,9 @@
hosts: oo_lb_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ haproxy_limit_nofile: 100000
+ haproxy_global_maxconn: 20000
+ haproxy_default_maxconn: 20000
haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
haproxy_frontends:
- name: atomic-openshift-api
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 6f8151d30..6e6cb3e01 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -33,9 +33,10 @@
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- name: verify api server
command: >
- curl -k --head --silent {{ openshift.master.api_url }}
+ curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {{ openshift.master.api_url }}/healthz/ready
register: api_available_output
- until: api_available_output.stdout.find("200 OK") != -1
+ until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
changed_when: false
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 56d30e9b9..aa71ab703 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,6 +1,12 @@
---
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
+ vars:
+ t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
+ pre_tasks:
+ - set_fact:
+ openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
+ when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
roles:
- openshift_facts
tasks:
@@ -8,20 +14,11 @@
# configured, we need to make sure to set the node properties beforehand if
# we do not want the defaults
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- use_flannel: "{{ openshift_use_flannel | default(None) }}"
- - role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ role: node
+ local_facts:
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- name: Check status of node certificates
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
@@ -39,22 +36,6 @@
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
- - name: Check status of flannel external etcd certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - node.etcd-client.crt
- - node.etcd-ca.crt
- register: g_external_etcd_flannel_cert_stat_result
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- - set_fact:
- etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- etcd_cert_prefix: node.etcd-
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Create temp directory for syncing certs
hosts: localhost
@@ -67,65 +48,6 @@
register: mktemp
changed_when: False
-- name: Configure flannel etcd certificates
- hosts: oo_first_etcd
- vars:
- etcd_generated_certs_dir: /etc/etcd/generated_certs
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- pre_tasks:
- - set_fact:
- etcd_needing_client_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- roles:
- - role: etcd_certificates
- when: openshift_use_flannel | default(false) | bool
- post_tasks:
- - name: Create a tarball of the etcd flannel certs
- command: >
- tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
- -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
- args:
- creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_client_certs
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - name: Retrieve the etcd cert tarballs
- fetch:
- src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: etcd_needing_client_certs
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
-
-- name: Copy the external etcd flannel certs to the nodes
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/node"
- state: directory
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - file:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
- owner: root
- group: root
- mode: 0600
- with_items:
- - node.etcd-client.crt
- - node.etcd-client.key
- - node.etcd-ca.crt
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
-
- name: Create node certificates
hosts: oo_first_master
vars:
@@ -204,6 +126,86 @@
roles:
- openshift_node
+- name: Gather and set facts for flannel certificatess
+ hosts: oo_nodes_to_config
+ tasks:
+ - name: Check status of flannel external etcd certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-ca.crt
+ register: g_external_etcd_flannel_cert_stat_result
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+ - set_fact:
+ etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list | intersect([false])}}"
+ etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ etcd_cert_prefix: node.etcd-
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+
+- name: Configure flannel etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ pre_tasks:
+ - set_fact:
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ roles:
+ - role: etcd_certificates
+ when: openshift_use_flannel | default(false) | bool
+ post_tasks:
+ - name: Create a tarball of the etcd flannel certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
+- name: Copy the external etcd flannel certs to the nodes
+ hosts: oo_nodes_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/node"
+ state: directory
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-client.key
+ - node.etcd-ca.crt
+ when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
+
+
- name: Additional node config
hosts: oo_nodes_to_config
vars:
@@ -254,9 +256,10 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl -k --head --silent {{ openshift.master.api_url }}
+ curl --silent --cacert {{ openshift.master.config_dir }}/master/ca.crt
+ {{ openshift.master.api_url }}/healthz/ready
register: api_available_output
- until: api_available_output.stdout.find("200 OK") != -1
+ until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
changed_when: false
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index b989e15fa..a7baea915 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index b989e15fa..a7baea915 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 3a48c82bc..30333f7be 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -10,6 +10,8 @@
os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
os_libvirt_network: "{{ libvirt_network | default('default') }}"
+ os_libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024) }}"
+ os_libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2) }}"
image_url: "{{ deployment_vars[deployment_type].image.url }}"
image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
image_name: "{{ deployment_vars[deployment_type].image.name }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index b00352539..d77b80c62 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -13,7 +13,7 @@
get_url:
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | reject("equalto", "") | join(".") }}'
+ dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
register: downloaded_image
@@ -59,10 +59,14 @@
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-- name: Create VMs drives
+- name: Create VM drives
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
with_items: instances
+- name: Create VM docker drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
+ with_items: instances
+
- name: Create VMs
virt:
name: '{{ item }}'
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index 0ca8e0974..56f450642 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -1,6 +1,6 @@
<domain type='kvm' id='8'>
<name>{{ item }}</name>
- <memory unit='GiB'>1</memory>
+ <memory unit='MiB'>{{ os_libvirt_instance_memory_mib }}</memory>
<metadata xmlns:ansible="https://github.com/ansible/ansible">
<ansible:tags>
<ansible:tag>environment-{{ cluster_env }}</ansible:tag>
@@ -9,8 +9,7 @@
<ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
</ansible:tags>
</metadata>
- <currentMemory unit='GiB'>1</currentMemory>
- <vcpu placement='static'>2</vcpu>
+ <vcpu placement='static'>{{ os_libvirt_instance_vcpu }}</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
@@ -35,10 +34,15 @@
<source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='vdb' bus='virtio'/>
+ <target dev='vdc' bus='virtio'/>
<readonly/>
</disk>
<controller type='usb' index='0' />
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index ead881f78..8b79940f4 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -4,6 +4,9 @@ disable_root: true
hostname: {{ item[0] }}
fqdn: {{ item[0] }}.example.com
+mounts:
+- [ vdb ]
+
users:
- default
- name: root
@@ -23,6 +26,12 @@ write_files:
permissions: 440
content: |
Defaults:openshift !requiretty
+ - content: |
+ DEVS=/dev/vdb
+ VG=docker_vg
+ path: /etc/sysconfig/docker-storage-setup
+ owner: root:root
+ permissions: '0644'
runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index f4749c28d..d6251ac83 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -45,12 +45,18 @@
- groups['oo_hosts_to_terminate']
- [ destroy, undefine ]
- - name: Delete VMs drives
+ - name: Delete VM drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
with_items: groups['oo_hosts_to_terminate']
+ - name: Delete VM docker drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
+
- name: Delete the VM cloud-init image
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index c78b52867..f28245f88 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -23,13 +23,13 @@ deployment_vars:
origin:
image:
url: "{{ lookup('oo_option', 'image_url') |
- default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz', True) }}"
+ default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
compression: "{{ lookup('oo_option', 'image_compression') |
default('xz', True) }}"
name: "{{ lookup('oo_option', 'image_name') |
default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
sha256: "{{ lookup('oo_option', 'image_sha256') |
- default('9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b', True) }}"
+ default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
sudo: yes
online:
@@ -42,5 +42,3 @@ deployment_vars:
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
-
-
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 9a3361919..119b376aa 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"