summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml2
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml2
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j22
-rw-r--r--playbooks/adhoc/metrics_setup/README.md25
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml37
-rw-r--r--playbooks/adhoc/metrics_setup/files/metrics.yaml116
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/install.yml36
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml10
-rw-r--r--playbooks/adhoc/metrics_setup/playbooks/uninstall.yml16
l---------playbooks/adhoc/noc/roles1
-rw-r--r--playbooks/adhoc/openshift_hosted_logging_efk.yaml6
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml3
-rw-r--r--playbooks/adhoc/setupnfs.yml2
-rw-r--r--playbooks/adhoc/uninstall.yml574
-rw-r--r--playbooks/aws/ansible-tower/config.yml24
l---------playbooks/aws/ansible-tower/filter_plugins1
-rw-r--r--playbooks/aws/ansible-tower/launch.yml79
l---------playbooks/aws/ansible-tower/roles1
-rw-r--r--playbooks/aws/ansible-tower/user_data.txt6
-rw-r--r--playbooks/aws/ansible-tower/vars.ops.yml9
-rw-r--r--playbooks/aws/ansible-tower/vars.yml1
-rw-r--r--playbooks/aws/openshift-cluster/add_nodes.yml5
-rw-r--r--playbooks/aws/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/aws/openshift-cluster/config.yml28
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml5
-rw-r--r--playbooks/aws/openshift-cluster/list.yml2
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml4
-rw-r--r--playbooks/aws/openshift-cluster/service.yml4
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml98
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j236
-rw-r--r--playbooks/aws/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/aws/openshift-cluster/update.yml23
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml5
-rw-r--r--playbooks/aws/openshift-cluster/vars.defaults.yml1
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml15
-rw-r--r--playbooks/aws/openshift-cluster/vars.yml49
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml4
-rw-r--r--playbooks/byo/openshift-cluster/config.yml18
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml106
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh23
l---------playbooks/byo/openshift-cluster/upgrades/docker/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml29
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml30
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml30
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml28
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml59
-rw-r--r--playbooks/byo/openshift-master/restart.yml18
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml18
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml18
-rw-r--r--playbooks/byo/openshift_facts.yml18
-rw-r--r--playbooks/byo/rhel_subscribe.yml20
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml28
-rw-r--r--playbooks/common/openshift-cluster/config.yml28
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml66
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml24
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml11
l---------playbooks/common/openshift-cluster/library1
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml30
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml7
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml5
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml6
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j21
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml11
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml14
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins1
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml24
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml59
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml345
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml192
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml10
-rw-r--r--playbooks/common/openshift-docker/config.yml9
l---------playbooks/common/openshift-docker/filter_plugins1
l---------playbooks/common/openshift-docker/roles1
-rw-r--r--playbooks/common/openshift-etcd/config.yml33
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml5
l---------playbooks/common/openshift-loadbalancer/filter_plugins (renamed from playbooks/adhoc/noc/filter_plugins)0
l---------playbooks/common/openshift-loadbalancer/lookup_plugins (renamed from playbooks/common/openshift-docker/lookup_plugins)0
l---------playbooks/common/openshift-loadbalancer/roles1
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml20
-rw-r--r--playbooks/common/openshift-master/config.yml123
l---------playbooks/common/openshift-master/library1
-rwxr-xr-xplaybooks/common/openshift-master/library/modify_yaml.py95
-rw-r--r--playbooks/common/openshift-master/restart.yml4
-rw-r--r--playbooks/common/openshift-master/scaleup.yml5
-rw-r--r--playbooks/common/openshift-node/config.yml214
-rw-r--r--playbooks/common/openshift-node/scaleup.yml5
-rw-r--r--playbooks/gce/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/gce/openshift-cluster/config.yml25
-rw-r--r--playbooks/gce/openshift-cluster/library/gce.py543
-rw-r--r--playbooks/gce/openshift-cluster/list.yml2
-rw-r--r--playbooks/gce/openshift-cluster/service.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml7
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/gce/openshift-cluster/update.yml23
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml9
-rw-r--r--playbooks/libvirt/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml24
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml6
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/service.yml4
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml40
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml18
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data9
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml10
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml20
-rw-r--r--playbooks/libvirt/openshift-cluster/vars.yml22
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml23
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml21
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data6
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml16
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml23
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml9
134 files changed, 3002 insertions, 1058 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index c14d08e87..5a5a00ea4 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -8,7 +8,7 @@
- hosts:
- OSEv3:children
- sudo: yes
+ become: yes
tasks:
- shell: docker ps -a -q | xargs docker stop
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
index 471c41f16..b380a74d6 100644
--- a/playbooks/adhoc/bootstrap-fedora.yml
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -1,4 +1,4 @@
- hosts: OSEv3
tasks:
- name: install python and deps for ansible modules
- raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
index 5654ef6c4..df082614b 100644
--- a/playbooks/adhoc/create_pv/pv-template.j2
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -10,7 +10,7 @@ spec:
storage: {{ vol_size }}Gi
accessModes:
- ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
+ persistentVolumeReclaimPolicy: Retain
awsElasticBlockStore:
volumeID: aws://{{ vol_az }}/{{ vol_id }}
fsType: ext4
diff --git a/playbooks/adhoc/metrics_setup/README.md b/playbooks/adhoc/metrics_setup/README.md
new file mode 100644
index 000000000..71aa1e109
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/README.md
@@ -0,0 +1,25 @@
+## Playbook for adding [Metrics](https://github.com/openshift/origin-metrics) to Openshift
+
+See OSE Ansible [readme](https://github.com/openshift/openshift-ansible/blob/master/README_OSE.md) for general install instructions. Playbook has been tested on OSE 3.1/RHEL7.2 cluster
+
+
+Add the following vars to `[OSEv3:vars]` section of your inventory file
+```
+[OSEv3:vars]
+# Enable cluster metrics
+use_cluster_metrics=true
+metrics_external_service=< external service name for metrics >
+metrics_image_prefix=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/
+metrics_image_version=3.1.0
+```
+
+Run playbook
+```
+ansible-playbook -i $INVENTORY_FILE playbooks/install.yml
+```
+
+## Contact
+Email: hawkular-dev@lists.jboss.org
+
+## Credits
+Playbook adapted from install shell scripts by Matt Mahoney
diff --git a/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
new file mode 100644
index 000000000..f70e0b18b
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/files/metrics-deployer-setup.yaml
@@ -0,0 +1,37 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "List"
+metadata:
+ name: metrics-deployer-setup
+ annotations:
+ description: "Required dependencies for the metrics deployer pod."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+items:
+-
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
diff --git a/playbooks/adhoc/metrics_setup/files/metrics.yaml b/playbooks/adhoc/metrics_setup/files/metrics.yaml
new file mode 100644
index 000000000..d823b2587
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/files/metrics.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "hawkular/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "0.7.0-SNAPSHOT"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml
new file mode 100644
index 000000000..235f775ef
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/install.yml
@@ -0,0 +1,36 @@
+---
+- include: master_config_facts.yml
+- name: "Install metrics"
+ hosts: masters
+ vars:
+ metrics_public_url: "https://{{ metrics_external_service }}/hawkular/metrics"
+ tasks:
+ - name: "Add metrics url to master config"
+ lineinfile: "state=present dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL' insertbefore='^\ \ publicURL' line='\ \ metricsPublicURL: {{ metrics_public_url }}'"
+
+ - name: "Restart master service"
+ service: name=atomic-openshift-master state=restarted
+
+ - name: "Copy metrics-deployer yaml to remote"
+ copy: "src=../files/metrics-deployer-setup.yaml dest=/tmp/metrics-deployer-setup.yaml force=yes"
+
+ - name: "Add metrics-deployer"
+ command: "{{item}}"
+ with_items:
+ - oc project openshift-infra
+ - oc create -f /tmp/metrics-deployer-setup.yaml
+
+ - name: "Give metrics-deployer SA permissions"
+ command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer"
+
+ - name: "Give heapster SA permissions"
+ command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster"
+
+ - name: "Create metrics-deployer secret"
+ command: "oc secrets new metrics-deployer nothing=/dev/null"
+
+ - name: "Copy metrics.yaml to remote"
+ copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes"
+
+ - name: "Process yml template"
+ shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
new file mode 100644
index 000000000..65de11bc4
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/master_config_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: "Load master config"
+ hosts: masters
+ vars:
+ master_config_file: "/tmp/ansible-metrics-{{ ansible_hostname }}"
+ tasks:
+ - name: "Fetch master config from remote"
+ fetch: "src=/etc/origin/master/master-config.yaml dest={{ master_config_file }} flat=yes"
+ - name: "Load config"
+ include_vars: "{{ master_config_file }}"
diff --git a/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
new file mode 100644
index 000000000..06c4586ee
--- /dev/null
+++ b/playbooks/adhoc/metrics_setup/playbooks/uninstall.yml
@@ -0,0 +1,16 @@
+---
+- name: "Uninstall metrics"
+ hosts: masters
+ tasks:
+ - name: "Remove metrics url from master config"
+ lineinfile: "state=absent dest=/etc/origin/master/master-config.yaml regexp='^\ \ metricsPublicURL'"
+
+ - name: "Delete metrics objects"
+ command: "{{item}}"
+ with_items:
+ - oc delete all --selector=metrics-infra
+ # - oc delete secrets --selector=metrics-infra
+ # - oc delete sa --selector=metrics-infra
+ - oc delete templates --selector=metrics-infra
+ - oc delete sa metrics-deployer
+ - oc delete secret metrics-deployer
diff --git a/playbooks/adhoc/noc/roles b/playbooks/adhoc/noc/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/noc/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
new file mode 100644
index 000000000..a3121d046
--- /dev/null
+++ b/playbooks/adhoc/openshift_hosted_logging_efk.yaml
@@ -0,0 +1,6 @@
+---
+- hosts: masters[0]
+ roles:
+ - role: openshift_hosted_logging
+ openshift_hosted_logging_cleanup: no
+
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
index 0dc021fbc..08e8f8968 100755
--- a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -49,5 +49,4 @@
name: "{{ item }}"
state: restarted
with_items:
- - oso-f22-host-monitoring
- - oso-rhel7-zagg-client
+ - oso-rhel7-host-monitoring
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
index 5f3631fcf..fd489dc70 100644
--- a/playbooks/adhoc/setupnfs.yml
+++ b/playbooks/adhoc/setupnfs.yml
@@ -1,7 +1,7 @@
---
### This playbook is old and we are currently not using NFS.
- hosts: tag_Name_nfs-v3-stg
- sudo: no
+ become: no
remote_user: root
gather_facts: no
roles:
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 8b620d9ad..4edd44fe4 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -7,221 +7,369 @@
# images
# RPMs
---
-- hosts:
- - OSEv3:children
+- hosts: OSEv3:children
+ become: yes
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
- sudo: yes
+ # Since we're not calling openshift_facts we'll do this for now
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+ - set_fact:
+ is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+- hosts: nodes
+ become: yes
tasks:
- - name: Detecting Operating System
- shell: ls /run/ostree-booted
- ignore_errors: yes
- failed_when: false
- register: ostree_output
-
- # Since we're not calling openshift_facts we'll do this for now
- - set_fact:
- is_atomic: "{{ ostree_output.rc == 0 }}"
- - set_fact:
- is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
-
- - name: Remove br0 interface
- shell: ovs-vsctl del-br br0
- changed_when: False
- failed_when: False
-
- - name: Stop services
- service: name={{ item }} state=stopped
- with_items:
- - atomic-enterprise-master
- - atomic-enterprise-node
- - atomic-openshift-master
- - atomic-openshift-master-api
- - atomic-openshift-master-controllers
- - atomic-openshift-node
- - etcd
- - haproxy
- - openshift-master
- - openshift-master-api
- - openshift-master-controllers
- - openshift-node
- - openvswitch
- - origin-master
- - origin-master-api
- - origin-master-controllers
- - origin-node
- - pcsd
- failed_when: false
-
- - name: Stop additional atomic services
- service: name={{ item }} state=stopped
- when: is_atomic | bool
- with_items:
- - etcd_container
- failed_when: false
-
- - name: Remove packages
- action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
- when: not is_atomic | bool
- with_items:
- - atomic-enterprise
- - atomic-enterprise-master
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - atomic-openshift
- - atomic-openshift-clients
- - atomic-openshift-master
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - corosync
- - etcd
- - haproxy
- - openshift
- - openshift-master
- - openshift-node
- - openshift-sdn
- - openshift-sdn-ovs
- - openvswitch
- - origin
- - origin-clients
- - origin-master
- - origin-node
- - origin-sdn-ovs
- - pacemaker
- - pcs
- - tuned-profiles-atomic-enterprise-node
- - tuned-profiles-atomic-openshift-node
- - tuned-profiles-openshift-node
- - tuned-profiles-origin-node
-
- - name: Remove linux interfaces
- shell: ip link del "{{ item }}"
- changed_when: False
- failed_when: False
- with_items:
- - lbr0
- - vlinuxbr
- - vovsbr
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
- changed_when: False
- failed_when: False
- with_items:
- - openshift-enterprise
- - atomic-enterprise
- - origin
-
- - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
- changed_when: False
- failed_when: False
- register: exited_containers_to_delete
- with_items:
- - aep3.*/aep
- - openshift3/ose
- - openshift/origin
-
- - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ exited_containers_to_delete.results }}"
-
- - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
- changed_when: False
- failed_when: False
- register: images_to_delete
- with_items:
- - registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- - registry\.access\..*redhat\.com/rhel7/etcd
- - docker.io/openshift
-
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ images_to_delete.results }}"
-
- - name: Remove sdn drop files
- file:
- path: /run/openshift-sdn
- state: absent
-
- - name: restart docker
- service:
- name: docker
- state: restarted
-
- - name: Remove remaining files
- file: path={{ item }} state=absent
- with_items:
- - "~{{ ansible_ssh_user }}/.kube"
- - /etc/ansible/facts.d/openshift.fact
- - /etc/atomic-enterprise
- - /etc/corosync
- - /etc/etcd
- - /etc/openshift
- - /etc/openshift-sdn
- - /etc/origin
- - /etc/systemd/system/atomic-openshift-master.service
- - /etc/systemd/system/atomic-openshift-master-api.service
- - /etc/systemd/system/atomic-openshift-master-controllers.service
- - /etc/systemd/system/atomic-openshift-node.service
- - /etc/systemd/system/etcd_container.service
- - /etc/systemd/system/openvswitch.service
- - /etc/sysconfig/atomic-enterprise-master
- - /etc/sysconfig/atomic-enterprise-master-api
- - /etc/sysconfig/atomic-enterprise-master-controllers
- - /etc/sysconfig/atomic-enterprise-node
- - /etc/sysconfig/atomic-openshift-master
- - /etc/sysconfig/atomic-openshift-master-api
- - /etc/sysconfig/atomic-openshift-master-controllers
- - /etc/sysconfig/atomic-openshift-node
- - /etc/sysconfig/openshift-master
- - /etc/sysconfig/openshift-node
- - /etc/sysconfig/origin-master
- - /etc/sysconfig/origin-master-api
- - /etc/sysconfig/origin-master-controllers
- - /etc/sysconfig/origin-node
- - /etc/systemd/system/atomic-openshift-node.service.wants
- - /root/.kube
- - /run/openshift-sdn
- - /usr/share/openshift/examples
- - /var/lib/atomic-enterprise
- - /var/lib/etcd
- - /var/lib/openshift
- - /var/lib/origin
- - /var/lib/pacemaker
- - /usr/lib/systemd/system/atomic-openshift-master-api.service
- - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
- - /usr/lib/systemd/system/origin-master-api.service
- - /usr/lib/systemd/system/origin-master-controllers.service
- - /usr/local/bin/openshift
- - /usr/local/bin/oadm
- - /usr/local/bin/oc
- - /usr/local/bin/kubectl
-
- # Since we are potentially removing the systemd unit files for separated
- # master-api and master-controllers services, so we need to reload the
- # systemd configuration manager
- - name: Reload systemd manager configuration
- command: systemctl daemon-reload
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-node
+ - atomic-openshift-node
+ - openshift-node
+ - openvswitch
+ - origin-node
+ failed_when: false
-- hosts: nodes
- sudo: yes
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - kubernetes-client
+ - openshift
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
+ - name: restart docker
+ service: name=docker state=restarted
+
+ - name: restart NetworkManager
+ service: name=NetworkManager state=restarted
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
+ - openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+ when: openshift_uninstall_images | default(True) | bool
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+ when: openshift_uninstall_images | default(True) | bool
+
+ - name: Remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/systemd/system/atomic-openshift-node.service
+ - /etc/systemd/system/atomic-openshift-node-dep.service
+ - /etc/systemd/system/origin-node.service
+ - /etc/systemd/system/origin-node-dep.service
+ - /etc/systemd/system/openvswitch.service
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/atomic-openshift-node-dep
+ - /etc/sysconfig/origin-node
+ - /etc/sysconfig/origin-node-dep
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openshift-node-dep
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-node
+ - /etc/systemd/system/atomic-openshift-node.service.wants
+ - /run/openshift-sdn
+ - /var/lib/atomic-enterprise
+ - /var/lib/openshift
+ - /var/lib/origin
+ - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
+ - /etc/dnsmasq.d/origin-dns.conf
+ - /etc/dnsmasq.d/origin-upstream-dns.conf
+
+- hosts: masters
+ become: yes
tasks:
- - name: restart docker
- service: name=docker state=restarted
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - pcsd
+ failed_when: false
+
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+ - atomic-openshift-master
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - corosync
+ - kubernetes-client
+ - openshift
+ - openshift-master
+ - origin
+ - origin-clients
+ - origin-master
+ - pacemaker
+ - pcs
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/corosync
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/systemd/system/atomic-openshift-master.service
+ - /etc/systemd/system/atomic-openshift-master-api.service
+ - /etc/systemd/system/atomic-openshift-master-controllers.service
+ - /etc/systemd/system/origin-master.service
+ - /etc/systemd/system/origin-master-api.service
+ - /etc/systemd/system/origin-master-controllers.service
+ - /etc/systemd/system/openvswitch.service
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-master-api
+ - /etc/sysconfig/atomic-enterprise-master-controllers
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /root/.kube
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/openshift
+ - /var/lib/origin
+ - /var/lib/pacemaker
+ - /var/lib/pcsd
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
+
+- hosts: etcd
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd
+ failed_when: false
+
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - etcd
+ - firewalld
+
+ - name: Stop additional atomic services
+ service: name={{ item }} state=stopped
+ when: is_containerized | bool
+ with_items:
+ - etcd_container
+ failed_when: false
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - etcd
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/etcd
+ - /etc/systemd/system/etcd_container.service
+ - /var/lib/etcd
+
+- hosts: lb
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - haproxy
+ failed_when: false
+
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - firewalld
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - haproxy
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /var/lib/haproxy
diff --git a/playbooks/aws/ansible-tower/config.yml b/playbooks/aws/ansible-tower/config.yml
deleted file mode 100644
index eb3f1a1da..000000000
--- a/playbooks/aws/ansible-tower/config.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: "populate oo_hosts_to_config host group if needed"
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - name: Evaluate oo_host_group_exp if it's set
- add_host: "name={{ item }} groups=oo_hosts_to_config"
- with_items: "{{ oo_host_group_exp | default(['']) }}"
- when: oo_host_group_exp is defined
-
-- name: "Configure instances"
- hosts: oo_hosts_to_config
- connection: ssh
- user: root
- vars_files:
- - vars.yml
- - "vars.{{ oo_env }}.yml"
- roles:
- - os_ipv6_disable
- - ansible
- - ansible_tower
- - os_env_extras
diff --git a/playbooks/aws/ansible-tower/filter_plugins b/playbooks/aws/ansible-tower/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/aws/ansible-tower/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/launch.yml b/playbooks/aws/ansible-tower/launch.yml
deleted file mode 100644
index d40529435..000000000
--- a/playbooks/aws/ansible-tower/launch.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Launch instance(s)
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
-
- vars:
- inst_region: us-east-1
- rhel7_ami: ami-9101c8fa
- user_data_file: user_data.txt
-
- vars_files:
- - vars.yml
- - "vars.{{ oo_env }}.yml"
-
- tasks:
- - name: Launch instances in VPC
- ec2:
- state: present
- region: "{{ inst_region }}"
- keypair: mmcgrath_libra
- group_id: "{{ oo_security_group_ids }}"
- instance_type: c4.xlarge
- image: "{{ rhel7_ami }}"
- count: "{{ oo_new_inst_names | length }}"
- user_data: "{{ lookup('file', user_data_file) }}"
- wait: yes
- assign_public_ip: "{{ oo_assign_public_ip }}"
- vpc_subnet_id: "{{ oo_vpc_subnet_id }}"
- register: ec2
-
- - name: Add Name and environment tags to instances
- ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
- with_together:
- - oo_new_inst_names
- - ec2.instances
- args:
- tags:
- Name: "{{ item.0 }}"
-
- - name: Add other tags to instances
- ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
- with_items: ec2.instances
- args:
- tags: "{{ oo_new_inst_tags }}"
-
- - name: Add new instances public IPs to oo_hosts_to_config
- add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.public_ip }} groupname=oo_hosts_to_config"
- with_together:
- - oo_new_inst_names
- - ec2.instances
-
- - debug: var=ec2
-
- - name: Wait for ssh
- wait_for: "port=22 host={{ item.public_ip }}"
- with_items: ec2.instances
-
- - name: Wait for root user setup
- command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
- register: result
- until: result.rc == 0
- retries: 20
- delay: 10
- with_items: ec2.instances
-
-- name: Initial setup
- hosts: oo_hosts_to_config
- user: root
- gather_facts: true
-
- tasks:
-
- - name: Update All Things
- action: "{{ ansible_pkg_mgr }} name=* state=latest"
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml
diff --git a/playbooks/aws/ansible-tower/roles b/playbooks/aws/ansible-tower/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/aws/ansible-tower/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/aws/ansible-tower/user_data.txt b/playbooks/aws/ansible-tower/user_data.txt
deleted file mode 100644
index 643d17c32..000000000
--- a/playbooks/aws/ansible-tower/user_data.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-#cloud-config
-disable_root: 0
-
-system_info:
- default_user:
- name: root
diff --git a/playbooks/aws/ansible-tower/vars.ops.yml b/playbooks/aws/ansible-tower/vars.ops.yml
deleted file mode 100644
index feb5d786a..000000000
--- a/playbooks/aws/ansible-tower/vars.ops.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-oo_env_long: operations
-oo_zabbix_hostgroups: ['OPS Environment']
-oo_vpc_subnet_id: subnet-4f0bdd38 # USE OPS
-oo_assign_public_ip: yes
-oo_security_group_ids:
- - sg-02c2f267 # Libra (vpc)
- - sg-7fc4f41a # ops (vpc)
- - sg-4dc26829 # ops_tower (vpc)
diff --git a/playbooks/aws/ansible-tower/vars.yml b/playbooks/aws/ansible-tower/vars.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/playbooks/aws/ansible-tower/vars.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml
index 3d88e6b23..0e8eb90c1 100644
--- a/playbooks/aws/openshift-cluster/add_nodes.yml
+++ b/playbooks/aws/openshift-cluster/add_nodes.yml
@@ -6,14 +6,9 @@
gather_facts: no
vars_files:
- vars.yml
- - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
vars:
oo_extend_env: True
tasks:
- - fail:
- msg: Deployment type not supported for aws provider yet
- when: deployment_type == 'enterprise'
-
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
type: "compute"
diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml
index 9a3361919..119b376aa 100644
--- a/playbooks/aws/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/aws/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 9fba856a2..4839c100b 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,21 +1,37 @@
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../aws/openshift-cluster/vars.yml
- - ../../aws/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
- openshift_node_labels: '{"region": "{{ ec2_region }}", "type": "{{ hostvars[inventory_hostname]["ec2_tag_sub-host-type"] if inventory_hostname in groups["tag_host-type_node"] else hostvars[inventory_hostname]["ec2_tag_host-type"] }}"}'
+ openshift_node_labels:
+ region: "{{ deployment_vars[deployment_type].region }}"
+ type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index 15b83dfad..3edace493 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -6,12 +6,7 @@
gather_facts: no
vars_files:
- vars.yml
- - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
tasks:
- - fail:
- msg: Deployment type not supported for aws provider yet
- when: deployment_type == 'enterprise'
-
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
- include: tasks/launch_instances.yml
vars:
diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml
index 8b41a355e..a542b4ca3 100644
--- a/playbooks/aws/openshift-cluster/list.yml
+++ b/playbooks/aws/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
index 7e3a47964..6fa9142a0 100644
--- a/playbooks/aws/openshift-cluster/scaleup.yml
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups.nodes_to_add }}"
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
@@ -24,7 +24,7 @@
vars:
g_new_node_hosts: "{{ groups.nodes_to_add }}"
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml
index d5f7d6b19..f7f4812bb 100644
--- a/playbooks/aws/openshift-cluster/service.yml
+++ b/playbooks/aws/openshift-cluster/service.yml
@@ -16,7 +16,7 @@
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ master_hosts | default([]) }}"
- name: Evaluate g_service_nodes
@@ -24,7 +24,7 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ node_hosts | default([]) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 63be06ecf..d22c86cda 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -8,80 +8,50 @@
sub_host_type: "{{ g_sub_host_type }}"
- set_fact:
- ec2_region: "{{ lookup('env', 'ec2_region')
- | default(deployment_vars[deployment_type].region, true) }}"
- when: ec2_region is not defined
-- set_fact:
- ec2_image_name: "{{ lookup('env', 'ec2_image_name')
- | default(deployment_vars[deployment_type].image_name, true) }}"
- when: ec2_image_name is not defined and ec2_image is not defined
-- set_fact:
- ec2_image: "{{ lookup('env', 'ec2_image')
- | default(deployment_vars[deployment_type].image, true) }}"
- when: ec2_image is not defined and not ec2_image_name
-- set_fact:
- ec2_keypair: "{{ lookup('env', 'ec2_keypair')
- | default(deployment_vars[deployment_type].keypair, true) }}"
- when: ec2_keypair is not defined
-- set_fact:
- ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
- | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
- when: ec2_vpc_subnet is not defined
-- set_fact:
- ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
- | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
- when: ec2_assign_public_ip is not defined
-
-- set_fact:
- ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_master_security_groups | default(lookup('env', 'ec2_master_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "master" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_etcd_security_groups | default(lookup('env', 'ec2_etcd_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "etcd" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_infra_security_groups | default(lookup('env', 'ec2_infra_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "infra"
- set_fact:
- ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
- ec2_security_groups: "{{ ec2_node_security_groups | default(lookup('env', 'ec2_node_security_groups') | default(lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true), true), true) }}"
+ ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "compute"
- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
- | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ deployment_vars[deployment_type].type }}"
when: ec2_instance_type is not defined
- set_fact:
- ec2_security_groups: "{{ lookup('env', 'ec2_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}"
+ ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}"
when: ec2_security_groups is not defined
- name: Find amis for deployment_type
ec2_ami_find:
- region: "{{ ec2_region }}"
- ami_id: "{{ ec2_image | default(omit, true) }}"
- name: "{{ ec2_image_name | default(omit, true) }}"
+ region: "{{ deployment_vars[deployment_type].region }}"
+ ami_id: "{{ deployment_vars[deployment_type].image }}"
+ name: "{{ deployment_vars[deployment_type].image_name }}"
register: ami_result
- fail: msg="Could not find requested ami"
when: not ami_result.results
- set_fact:
- latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+ latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}"
volume_defs:
etcd:
root:
volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
- etcd:
- volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
- device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
- iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
master:
root:
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
@@ -107,14 +77,14 @@
- name: Launch instance(s)
ec2:
state: present
- region: "{{ ec2_region }}"
- keypair: "{{ ec2_keypair }}"
- group: "{{ ec2_security_groups }}"
+ region: "{{ deployment_vars[deployment_type].region }}"
+ keypair: "{{ deployment_vars[deployment_type].keypair }}"
+ group: "{{ deployment_vars[deployment_type].security_groups }}"
instance_type: "{{ ec2_instance_type }}"
- image: "{{ latest_ami }}"
+ image: "{{ deployment_vars[deployment_type].image }}"
count: "{{ instances | length }}"
- vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
- assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+ vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}"
+ assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}"
user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
wait: yes
instance_tags:
@@ -127,7 +97,7 @@
register: ec2
- name: Add Name tag to instances
- ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+ ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present
with_together:
- instances
- ec2.instances
@@ -136,29 +106,32 @@
Name: "{{ item.0 }}"
- set_fact:
- instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }},
- tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
+ instance_groups: >
+ tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }},
+ tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }},
+ tag_sub-host-type_{{ sub_host_type }}
- set_fact:
node_label:
- region: "{{ec2_region}}"
+ region: "{{ deployment_vars[deployment_type].region }}"
type: "{{sub_host_type}}"
when: host_type == "node"
- set_fact:
node_label:
- region: "{{ec2_region}}"
+ region: "{{ deployment_vars[deployment_type].region }}"
type: "{{host_type}}"
when: host_type != "node"
- set_fact:
logrotate:
- name: syslog
- path: "/var/log/cron
- \n/var/log/maillog
- \n/var/log/messages
- \n/var/log/secure
- \n/var/log/spooler \n"
+ path: |
+ /var/log/cron
+ /var/log/maillog
+ /var/log/messages
+ /var/log/secure
+ /var/log/spooler"
options:
- daily
- rotate 7
@@ -173,10 +146,11 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
+ ec2_tag_sub-host-type: "{{ sub_host_type }}"
openshift_node_labels: "{{ node_label }}"
logrotate_scripts: "{{ logrotate }}"
with_together:
@@ -188,7 +162,7 @@
hostname: "{{ item.0 }}"
ansible_ssh_host: "{{ item.1.dns_name }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: nodes_to_add
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 3621a7d7d..b1087f9c4 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -1,30 +1,12 @@
#cloud-config
-{% if type == 'etcd' and 'etcd' in volume_defs[type] %}
-cloud_config_modules:
-- disk_setup
-- mounts
-
-mounts:
-- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
-
-disk_setup:
- xvdb:
- table_type: mbr
- layout: True
-
-fs_setup:
-- label: etcd_storage
- filesystem: xfs
- device: /dev/xvdb
- partition: auto
-{% endif %}
-
{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
mounts:
- [ xvdb ]
- [ ephemeral0 ]
+{% endif %}
write_files:
+{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
- content: |
DEVS=/dev/xvdb
VG=docker_vg
@@ -32,19 +14,7 @@ write_files:
owner: root:root
permissions: '0644'
{% endif %}
-
-{% if deployment_type == 'online' %}
-devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436
-
-disable_root: 0
-growpart:
- mode: auto
- devices: ['/var']
-runcmd:
-- xfs_growfs /var
-{% endif %}
-
-{% if deployment_vars[deployment_type].sudo %}
+{% if deployment_vars[deployment_type].become | bool %}
- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
permissions: 440
content: |
diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml
index 6dd5d8b62..fb13e1839 100644
--- a/playbooks/aws/openshift-cluster/terminate.yml
+++ b/playbooks/aws/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml
index 32bab76b5..d762203b2 100644
--- a/playbooks/aws/openshift-cluster/update.yml
+++ b/playbooks/aws/openshift-cluster/update.yml
@@ -1,20 +1,33 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Update - Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Update - Evaluate oo_hosts_to_update
add_host:
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 11026e38d..44d9a3e25 100644
--- a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -1,14 +1,13 @@
---
-# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
# Usage:
-# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>
- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
vars_files:
- "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
- "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
diff --git a/playbooks/aws/openshift-cluster/vars.defaults.yml b/playbooks/aws/openshift-cluster/vars.defaults.yml
deleted file mode 100644
index ed97d539c..000000000
--- a/playbooks/aws/openshift-cluster/vars.defaults.yml
+++ /dev/null
@@ -1 +0,0 @@
----
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
deleted file mode 100644
index 2e2f25ccd..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'integration', 'integration-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'integration', 'integration-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
deleted file mode 100644
index 18a53e12e..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'production', 'production-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'production', 'production-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'production', 'production-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
deleted file mode 100644
index 1f9ac4252..000000000
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-ec2_image: ami-9101c8fa
-ec2_image_name: libra-ops-rhel7*
-ec2_region: us-east-1
-ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.medium
-ec2_master_security_groups: [ 'stage', 'stage-master' ]
-ec2_infra_instance_type: c4.large
-ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
-ec2_node_instance_type: m4.large
-ec2_node_security_groups: [ 'stage', 'stage-node' ]
-ec2_etcd_instance_type: m4.large
-ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
-ec2_vpc_subnet: subnet-987c0def
-ec2_assign_public_ip: yes
diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml
index ae12286bd..d774187f0 100644
--- a/playbooks/aws/openshift-cluster/vars.yml
+++ b/playbooks/aws/openshift-cluster/vars.yml
@@ -3,42 +3,31 @@ debug_level: 2
deployment_rhel7_ent_base:
# rhel-7.1, requires cloud access subscription
- image: ami-10663b78
- image_name:
- region: us-east-1
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}"
+ image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+ region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: ec2-user
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ become: yes
+ keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+ type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+ security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+ vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+ assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
deployment_vars:
origin:
# centos-7, requires marketplace
- image: ami-61bbf104
- image_name:
- region: us-east-1
+ image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}"
+ image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}"
+ region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}"
ssh_user: centos
- sudo: yes
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
- online:
- # private ami
- image: ami-7a9e9812
- image_name: openshift-rhel7_*
- region: us-east-1
- ssh_user: root
- sudo: no
- keypair: libra
- type: m4.large
- security_groups: [ 'public' ]
- vpc_subnet:
- assign_public_ip:
+ become: yes
+ keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}"
+ type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}"
+ security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}"
+ vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}"
+ assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}"
+
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index 8893db245..658204c17 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -14,4 +14,6 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | default([]) }}"
+ | union(g_lb_hosts) | union(g_nfs_hosts)
+ | union(g_new_node_hosts)| union(g_new_master_hosts)
+ | default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 5887b3208..c5479d098 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,7 +1,21 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../byo/openshift-cluster/cluster_hosts.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
new file mode 100644
index 000000000..1c8d99341
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -0,0 +1,18 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
new file mode 100644
index 000000000..d7798d304
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -0,0 +1,106 @@
+
+- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - fail:
+ msg: Cannot upgrade Docker on Atomic operating systems.
+ when: openshift.common.is_atomic | bool
+
+ - name: Determine available Docker version
+ script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker
+ register: g_docker_version_result
+
+ - name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+
+ - name: Set fact if docker requires an upgrade
+ set_fact:
+ docker_upgrade: true
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<')
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
+
+# If a node fails, halt everything, the admin will need to clean up and we
+# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
+# and will not take any action on a node already running 1.10+.
+- name: Evacuate and upgrade nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ tasks:
+ - debug: var=docker_upgrade
+
+ - name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts)
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Stop containerized services
+ service: name={{ item }} state=stopped
+ with_items:
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ - etcd_container
+ - openvswitch
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Remove all containers and images
+ script: files/nuke_images.sh docker
+ register: nuke_images_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ register: docker_upgrade_result
+ when: docker_upgrade is defined and docker_upgrade | bool
+
+ - name: Restart containerized services
+ service: name={{ item }} state=started
+ with_items:
+ - etcd_container
+ - openvswitch
+ - "{{ openshift.common.service_type }}-master"
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ - "{{ openshift.common.service_type }}-node"
+ failed_when: false
+ when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+
+ - name: Wait for master API to come back online
+ become: no
+ local_action:
+ module: wait_for
+ host="{{ inventory_hostname }}"
+ state=started
+ delay=10
+ port="{{ openshift.master.api_port }}"
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
+ when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
new file mode 100644
index 000000000..6b155f7fa
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Stop any running containers
+running_container_ids=`docker ps -q`
+if test -n "$running_container_ids"
+then
+ docker stop $running_container_ids
+fi
+
+# Delete all containers
+container_ids=`docker ps -a -q`
+if test -n "$container_ids"
+then
+ docker rm -f -v $container_ids
+fi
+
+# Delete all images (forcefully)
+image_ids=`docker images -q`
+if test -n "$image_ids"
+then
+ # Taken from: https://gist.github.com/brianclements/f72b2de8e307c7b56689#gistcomment-1443144
+ docker rmi $(docker images | grep "$2/\|/$2 \| $2 \|$2 \|$2-\|$2_" | awk '{print $1 ":" $2}') 2>/dev/null || echo "No images matching \"$2\" left to purge."
+fi
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
new file mode 100644
index 000000000..0f86abd89
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -0,0 +1,29 @@
+# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
+#
+# Currently only supports upgrading 1.9.x to >= 1.10.x.
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+ changed_when: false
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../cluster_hosts.yml
+
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- include: docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index b52456dcd..76bfff9b6 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -1,12 +1,28 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found."
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
- vars_files:
- - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
vars:
- g_etcd_hosts: "{{ groups.etcd | default([]) }}"
- g_master_hosts: "{{ groups.masters | default([]) }}"
- g_nfs_hosts: "{{ groups.nfs | default([]) }}"
- g_node_hosts: "{{ groups.nodes | default([]) }}"
- g_lb_hosts: "{{ groups.lb | default([]) }}"
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
index c434be5b7..eb1f481d7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
@@ -4,7 +4,6 @@
This playbook currently performs the
following steps.
-**TODO: update for current steps**
* Upgrade and restart master services
* Upgrade and restart node services
* Modifies the subset of the configuration necessary
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index e07e2b88e..c17446162 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -1,12 +1,28 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found."
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
- vars_files:
- - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
vars:
- g_etcd_hosts: "{{ groups.etcd | default([]) }}"
- g_master_hosts: "{{ groups.masters | default([]) }}"
- g_nfs_hosts: "{{ groups.nfs | default([]) }}"
- g_node_hosts: "{{ groups.nodes | default([]) }}"
- g_lb_hosts: "{{ groups.lb | default([]) }}"
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 20fa9b10f..99592d85a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -1,11 +1,29 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found."
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../../../common/openshift-cluster/evaluate_groups.yml
vars:
- g_etcd_hosts: "{{ groups.etcd | default([]) }}"
- g_master_hosts: "{{ groups.masters | default([]) }}"
- g_nfs_hosts: "{{ groups.nfs | default([]) }}"
- g_node_hosts: "{{ groups.nodes | default([]) }}"
- g_lb_hosts: "{{ groups.lb | default([]) }}"
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_deployment_type: "{{ deployment_type }}"
- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
new file mode 100644
index 000000000..62577c3df
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
@@ -0,0 +1,16 @@
+# v3.1 to v3.2 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..24617620b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,59 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version }} found."
+ when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index a78a6aa3d..0cf669ae3 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,4 +1,18 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../common/openshift-master/restart.yml
- vars_files:
- - ../../byo/openshift-cluster/cluster_hosts.yml
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 18797d02a..fced79262 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,7 +1,21 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../common/openshift-master/scaleup.yml
- vars_files:
- - ../../byo/openshift-cluster/cluster_hosts.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 0343597b5..5737bb0e0 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -1,7 +1,21 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
- include: ../../common/openshift-node/scaleup.yml
- vars_files:
- - ../../byo/openshift-cluster/cluster_hosts.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 916dfd0a6..db8703db6 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,4 +1,22 @@
---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
- name: Gather Cluster facts
hosts: OSEv3
roles:
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 990ddd2f2..f093411ef 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,5 +1,23 @@
---
-- hosts: all
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: openshift-cluster/cluster_hosts.yml
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
+- hosts: l_oo_all_hosts
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index 1ac78468a..a34322754 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -17,6 +17,7 @@
- role: openshift_master_cluster
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
@@ -27,30 +28,5 @@
(osm_use_cockpit | bool or osm_use_cockpit is undefined )
- role: flannel_register
when: openshift.common.use_flannel | bool
- - role: pods
- when: openshift.common.deployment_type == 'online'
- - role: os_env_extras
- when: openshift.common.deployment_type == 'online'
-- name: Create persistent volumes and create hosted services
- hosts: oo_first_master
- vars:
- attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
- deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
- persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
- persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
- roles:
- - role: openshift_persistent_volumes
- when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- - role: openshift_router
- when: deploy_infra | bool
- - role: openshift_registry
- registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
- when: deploy_infra | bool and attach_registry_volume | bool
+
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 23c8f039e..5fec11541 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,16 +1,42 @@
---
- include: evaluate_groups.yml
+- include: initialize_facts.yml
+
- include: validate_hostnames.yml
-- include: ../openshift-docker/config.yml
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
- include: ../openshift-etcd/config.yml
- include: ../openshift-nfs/config.yml
+- include: ../openshift-loadbalancer/config.yml
+
- include: ../openshift-master/config.yml
- include: additional_config.yml
- include: ../openshift-node/config.yml
+
+- include: openshift_hosted.yml
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
new file mode 100644
index 000000000..f2bcc872f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -0,0 +1,66 @@
+---
+- include: evaluate_groups.yml
+
+- name: Load openshift_facts
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - fail: msg="This playbook requires a master version of at least Origin 1.1 or OSE 3.1"
+ when: not openshift.common.version_gte_3_1_1_or_1_1_1 | bool
+
+- name: Reconfigure masters to listen on our new dns_port
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../roles/openshift_master/handlers/main.yml
+ vars:
+ os_firewall_allow:
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+ roles:
+ - os_firewall
+ tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ use_dnsmasq: True
+ - role: master
+ local_facts:
+ dns_port: '8053'
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: dnsConfig.bindAddress
+ yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
+ notify: restart master
+ - meta: flush_handlers
+
+- name: Configure nodes for dnsmasq
+ hosts: oo_nodes_to_config
+ handlers:
+ - include: ../../../roles/openshift_node/handlers/main.yml
+ pre_tasks:
+ - openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: common
+ local_facts:
+ use_dnsmasq: True
+ - role: node
+ local_facts:
+ dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_node_dnsmasq
+ post_tasks:
+ - modify_yaml:
+ dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
+ yaml_key: dnsIP
+ yaml_value: "{{ openshift.node.dns_ip }}"
+ notify: restart node
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 432a92b49..c5273b08f 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -29,12 +29,20 @@
msg: The nfs group must be limited to one host
when: (groups[g_nfs_hosts] | default([])) | length > 1
+ - name: Evaluate oo_all_hosts
+ add_host:
+ name: "{{ item }}"
+ groups: oo_all_hosts
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: g_all_hosts | default([])
+
- name: Evaluate oo_masters
add_host:
name: "{{ item }}"
groups: oo_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
- name: Evaluate oo_etcd_to_config
@@ -42,7 +50,7 @@
name: "{{ item }}"
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_etcd_hosts | default([]) }}"
- name: Evaluate oo_masters_to_config
@@ -50,7 +58,7 @@
name: "{{ item }}"
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
- name: Evaluate oo_nodes_to_config
@@ -58,7 +66,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
# Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
@@ -67,7 +75,7 @@
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
@@ -83,7 +91,7 @@
name: "{{ g_master_hosts[0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
when: g_master_hosts|length > 0
- name: Evaluate oo_lb_to_config
@@ -91,7 +99,7 @@
name: "{{ item }}"
groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_lb_hosts | default([]) }}"
- name: Evaluate oo_nfs_to_config
@@ -99,5 +107,5 @@
name: "{{ item }}"
groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_nfs_hosts | default([]) }}"
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
new file mode 100644
index 000000000..37f523246
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -0,0 +1,11 @@
+---
+- name: Initialize host facts
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: common
+ local_facts:
+ hostname: "{{ openshift_hostname | default(None) }}"
diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library
new file mode 120000
index 000000000..d0b7393d3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/library
@@ -0,0 +1 @@
+../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
new file mode 100644
index 000000000..811b3d685
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -0,0 +1,30 @@
+- name: Create persistent volumes and create hosted services
+ hosts: oo_first_master
+ vars:
+ attach_registry_volume: "{{ openshift.hosted.registry.storage.kind != None }}"
+ deploy_infra: "{{ openshift.master.infra_nodes | default([]) | length > 0 }}"
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0
+ - role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ - role: openshift_registry
+ registry_volume_claim: "{{ openshift.hosted.registry.storage.volume.name }}-claim"
+ when: deploy_infra | bool and attach_registry_volume | bool
+ - role: openshift_metrics
+ when: openshift.hosted.metrics.deploy | bool
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
index 1474bb3ca..e3d16d359 100644
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
@@ -1,8 +1,15 @@
---
+- include: evaluate_groups.yml
+
- hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
+ # Explicitly calling openshift_facts because it appears that when
+ # rhel_subscribe is skipped that the openshift_facts dependency for
+ # openshift_repos is also skipped (this is the case at least for Ansible
+ # 2.0.2)
+ - openshift_facts
- role: rhel_subscribe
when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
ansible_distribution == "RedHat" and
diff --git a/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
new file mode 100644
index 000000000..9bbeff660
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
+
+if [ ${1} == "origin" ]; then
+ image_name="openshift/origin"
+elif grep aep $unit_file 2>&1 > /dev/null; then
+ image_name="aep3/node"
+elif grep openshift3 $unit_file 2>&1 > /dev/null; then
+ image_name="openshift3/node"
+fi
+
+installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+docker pull ${image_name} 2>&1 > /dev/null
+available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
new file mode 100644
index 000000000..7bf249742
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+if [ `which dnf 2> /dev/null` ]; then
+ installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+ available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+else
+ installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+ available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
+fi
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
deleted file mode 100644
index 3a1a8ebb1..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-echo "---"
-echo "curr_version: ${yum_installed}"
-echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
index 63c8ef756..e31e7f8a3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
@@ -36,16 +36,17 @@
- name: Ensure AOS 3.0.2 or Origin 1.0.6
hosts: oo_first_master
tasks:
- fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ - fail:
+ msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"
when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
- name: Update cluster policy
hosts: oo_first_master
tasks:
- - name: oadm policy reconcile-cluster-roles --confirm
+ - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
- name: Upgrade default router
hosts: oo_first_master
@@ -108,5 +109,6 @@
vars:
openshift_examples_import_command: "update"
openshift_deployment_type: "{{ deployment_type }}"
+ registry_url: "{{ openshift.master.registry_url }}"
roles:
- openshift_examples
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 0fb38f32e..c3c1240d8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -54,7 +54,7 @@
- script: ../files/pre-upgrade-check
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
@@ -66,7 +66,7 @@
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }} openshift
+ script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
register: g_versions_result
- set_fact:
@@ -212,13 +212,10 @@
- name: Update deployment type
hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
roles:
- openshift_facts
- post_tasks:
- - openshift_facts:
- role: common
- local_facts:
- deployment_type: "{{ deployment_type }}"
- name: Update master facts
hosts: oo_masters_to_config
@@ -493,7 +490,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -572,6 +569,7 @@
# Update the existing templates
- role: openshift_examples
openshift_examples_import_command: replace
+ registry_url: "{{ openshift.master.registry_url }}"
pre_tasks:
- name: Collect all routers
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
index 196393b2a..f030eed18 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
@@ -19,6 +19,7 @@
# Update the existing templates
- role: openshift_examples
openshift_examples_import_command: replace
+ registry_url: "{{ openshift.master.registry_url }}"
pre_tasks:
- name: Collect all routers
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
index 12b9c84d3..85d7073f2 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
@@ -29,19 +29,20 @@
valid version for a {{ target_version }} upgrade
when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-- name: Verify upgrade can proceed
+- name: Verify upgrade targets
hosts: oo_masters_to_config:oo_nodes_to_config
vars:
target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
tasks:
- name: Clean package cache
command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- name: Determine available versions
- script: ../files/versions.sh {{ g_new_service_name }}
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
register: g_versions_result
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
index 54bb251f7..e5cfa58aa 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
@@ -12,7 +12,7 @@
openshift_version: "{{ openshift_pkg_version | default('') }}"
tasks:
- name: Upgrade master packages
- command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+ command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
when: not openshift.common.is_containerized | bool
- name: Ensure python-yaml present for config upgrade
@@ -63,7 +63,7 @@
- openshift_facts
tasks:
- name: Upgrade node packages
- command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+ command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
when: not openshift.common.is_containerized | bool
- name: Restart node service
@@ -103,7 +103,7 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm
run_once: true
- name: Reconcile Cluster Role Bindings
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2 b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
new file mode 120000
index 000000000..cf20e8959
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/atomic-openshift-master.j2
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/atomic-openshift-master.j2 \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
new file mode 100644
index 000000000..319758a06
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
@@ -0,0 +1,11 @@
+- include_vars: ../../../../../roles/openshift_node/vars/main.yml
+
+- name: Update systemd units
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+
+- name: Verifying the correct version was configured
+ shell: grep {{ verify_upgrade_version }} {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
new file mode 120000
index 000000000..5a3dd12b3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
new file mode 120000
index 000000000..3ee319365
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker-cluster
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/docker-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
new file mode 100644
index 000000000..c7b18f51b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
@@ -0,0 +1,14 @@
+- name: Check if Docker is installed
+ command: rpm -q docker
+ register: pkg_check
+ failed_when: pkg_check.rc > 1
+ changed_when: no
+
+- name: Upgrade Docker
+ command: "{{ ansible_pkg_mgr}} update -y docker"
+ when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+ register: docker_upgrade
+
+- name: Restart Docker
+ command: systemctl restart docker
+ when: docker_upgrade | changed
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
new file mode 120000
index 000000000..27ddaa18b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
new file mode 120000
index 000000000..53bed9684
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
@@ -0,0 +1 @@
+../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
new file mode 120000
index 000000000..cf407f69b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
@@ -0,0 +1 @@
+../../../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
new file mode 120000
index 000000000..f44f8eb4f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/native-cluster
@@ -0,0 +1 @@
+../../../../../roles/openshift_master/templates/native-cluster \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
new file mode 100644
index 000000000..a911f12be
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
@@ -0,0 +1,24 @@
+- name: Prepare for Node evacuation
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
+- include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: not openshift.common.is_containerized | bool
+
+- include: containerized_upgrade.yml
+ when: openshift.common.is_containerized | bool
+
+- name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: openshift.node.schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
new file mode 100644
index 000000000..c16965a35
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
@@ -0,0 +1,59 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+ hosts: oo_first_master
+ vars:
+ openshift_deployment_type: "{{ deployment_type }}"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+ oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ roles:
+ - openshift_manageiq
+ # Create the new templates shipped in 3.2, existing templates are left
+ # unmodified. This prevents the subsequent role definition for
+ # openshift_examples from failing when trying to replace templates that do
+ # not already exist. We could have potentially done a replace --force to
+ # create and update in one step.
+ - openshift_examples
+ # Update the existing templates
+ - role: openshift_examples
+ registry_url: "{{ openshift.master.registry_url }}"
+ openshift_examples_import_command: replace
+ pre_tasks:
+ - name: Collect all routers
+ command: >
+ {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+ register: all_routers
+ failed_when: false
+ changed_when: false
+
+ - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ when: all_routers.rc == 0
+
+ - set_fact: haproxy_routers=[]
+ when: all_routers.rc != 0
+
+ - name: Update router image to current version
+ when: all_routers.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+ --api-version=v1
+ with_items: haproxy_routers
+
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -n default -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
new file mode 100644
index 000000000..f163cca86
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -0,0 +1,345 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts and update repos
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_facts
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+ hosts: oo_first_master
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
+
+ - fail:
+ msg: >
+ This upgrade does not support Pacemaker:
+ https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
+ when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
+
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ target_version }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
+
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+- name: Verify node processes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ vars:
+ target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ upgrading: True
+ handlers:
+ - include: ../../../../../roles/openshift_master/handlers/main.yml
+ - include: ../../../../../roles/openshift_node/handlers/main.yml
+ roles:
+ # We want the cli role to evaluate so that the containerized oc/oadm wrappers
+ # are modified to use the correct image tag. However, this can trigger a
+ # docker restart if new configuration is laid down which would immediately
+ # pull the latest image and defeat the purpose of these tasks.
+ - { role: openshift_cli }
+ pre_tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/rpm_versions.sh {{ g_new_service_name }}
+ register: g_rpm_versions_result
+ when: not openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Determine available versions
+ script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+ register: g_containerized_versions_result
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+ when: openshift.common.is_containerized | bool
+
+ - set_fact:
+ g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+ when: openshift_pkg_version is not defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+ when: openshift_pkg_version is defined
+
+ - set_fact:
+ g_new_version: "{{ openshift_image_tag | replace('v','') }}"
+ when: openshift_image_tag is defined
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: g_aos_versions.curr_version == ""
+
+ - fail:
+ msg: Verifying the correct version was found
+ when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
+
+ - include_vars: ../../../../../roles/openshift_master/vars/main.yml
+ when: inventory_hostname in groups.oo_masters_to_config
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ when: inventory_hostname in groups.oo_masters_to_config
+
+ - include_vars: ../../../../../roles/openshift_node/vars/main.yml
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+ when: inventory_hostname in groups.oo_nodes_to_config
+
+ # Note: the version number is hardcoded here in hopes of catching potential
+ # bugs in how g_aos_versions.curr_version is set
+ - name: Verifying the correct version is installed for upgrade
+ shell: grep 3.1.1.6 {{ item }}
+ with_items:
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/{{ openshift.common.service_type }}*
+ when: verify_upgrade_version is defined
+
+ - name: Verifying the image version is used in the systemd unit
+ shell: grep IMAGE_VERSION {{ item }}
+ with_items:
+ - /etc/systemd/system/openvswitch.service
+ - /etc/systemd/system/{{ openshift.common.service_type }}*.service
+ when: openshift.common.is_containerized | bool
+
+ - fail:
+ msg: This playbook requires Origin 1.1 or later
+ when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+ - fail:
+ msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+ when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+ - fail:
+ msg: Upgrade packages not found
+ when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ tasks:
+ - name: Determine available Docker
+ script: ../files/rpm_versions.sh docker
+ register: g_docker_version_result
+ when: not openshift.common.is_atomic | bool
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.9 or later
+ when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
+
+ # TODO: add check to upgrade ostree to get latest Docker
+
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ connection: local
+ become: no
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
+ tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+ - set_fact:
+ etcd_backup_complete: True
+
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
new file mode 100644
index 000000000..5c96ad094
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
@@ -0,0 +1,9 @@
+- name: Upgrade packages
+ command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+ action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+ when: not openshift.common.is_atomic | bool
+
+- name: Restart node service
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
new file mode 100644
index 000000000..964257af5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -0,0 +1,192 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool
+ - name: Set post docker install facts
+ openshift_facts:
+ role: "{{ item.role }}"
+ local_facts: "{{ item.local_facts }}"
+ with_items:
+ - role: docker
+ local_facts:
+ openshift_image_tag: "v{{ g_new_version }}"
+ openshift_version: "{{ g_new_version }}"
+
+- name: Upgrade docker
+ hosts: oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
+ - include: docker_upgrade.yml
+ when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
+
+# The cli image is used by openshift_docker_facts to determine the currently installed
+# version. We need to explicitly pull the latest image to handle cases where
+# the locally cached 'latest' tag is older the g_new_version.
+- name: Download cli image
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - { role: openshift_docker_facts }
+ vars:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ tasks:
+ - name: Pull Images
+ command: >
+ docker pull {{ item }}:latest
+ with_items:
+ - "{{ openshift.common.cli_image }}"
+ when: openshift.common.is_containerized | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../../../roles/openshift_master/handlers/main.yml
+ roles:
+ - openshift_facts
+ tasks:
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include_vars: ../../../../../roles/openshift_master/vars/main.yml
+
+ - name: Update systemd units
+ include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+
+# - name: Upgrade master configuration
+# openshift_upgrade_config:
+# from_version: '3.1'
+# to_version: '3.2'
+# role: master
+# config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+ hosts: oo_nodes_to_config
+ serial: 1
+ roles:
+ - openshift_facts
+ handlers:
+ - include: ../../../../../roles/openshift_node/handlers/main.yml
+ tasks:
+ - include: node_upgrade.yml
+
+ - set_fact:
+ node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+ hosts: oo_masters_to_config
+ roles:
+ - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+ vars:
+ origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+ ent_reconcile_bindings: true
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ upgrading: True
+ tasks:
+ - name: Verifying the correct commandline tools are available
+ shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
+ when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
+
+ - name: Reconcile Cluster Roles
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --additive-only=true --confirm
+ run_once: true
+
+ - name: Reconcile Cluster Role Bindings
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:authenticated:oauth
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ run_once: true
+
+ - name: Reconcile Security Context Constraints
+ command: >
+ {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+ run_once: true
+
+ - set_fact:
+ reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index fd82997b9..50e25984f 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,19 +1,9 @@
---
-- include: evaluate_groups.yml
-
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
roles:
- openshift_facts
tasks:
- - openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
diff --git a/playbooks/common/openshift-docker/config.yml b/playbooks/common/openshift-docker/config.yml
deleted file mode 100644
index 092d5533c..000000000
--- a/playbooks/common/openshift-docker/config.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-- name: Configure docker hosts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
diff --git a/playbooks/common/openshift-docker/filter_plugins b/playbooks/common/openshift-docker/filter_plugins
deleted file mode 120000
index 99a95e4ca..000000000
--- a/playbooks/common/openshift-docker/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-docker/roles b/playbooks/common/openshift-docker/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/common/openshift-docker/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 93eb157cb..a95de8cf3 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,21 +1,14 @@
---
- name: Set etcd facts needed for generating certs
hosts: oo_etcd_to_config
+ any_errors_fatal: true
roles:
- openshift_facts
tasks:
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- - role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
+ role: etcd
+ local_facts:
+ etcd_image: "{{ osm_etcd_image | default(None) }}"
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
@@ -30,6 +23,8 @@
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
etcd_cert_prefix:
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -51,7 +46,7 @@
| oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
@@ -59,7 +54,7 @@
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
@@ -67,7 +62,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: etcd_needing_server_certs
+ with_items: "{{ etcd_needing_server_certs | default([]) }}"
# Configure a first etcd host to avoid conflicts in choosing a leader
# if other members come online too quickly.
@@ -77,7 +72,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -89,8 +84,8 @@
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing
roles:
- - etcd
- - role: nickhammond.logrotate
+ - openshift_etcd
+ - nickhammond.logrotate
# Configure the remaining etcd hosts, skipping the first one we dealt with above.
- name: Configure remaining etcd hosts
@@ -99,7 +94,7 @@
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
- etcd_peers_group: oo_etcd_to_config
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -111,7 +106,7 @@
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing
roles:
- - etcd
+ - openshift_etcd
- role: nickhammond.logrotate
- name: Delete temporary directory on localhost
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
new file mode 100644
index 000000000..f4392173a
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -0,0 +1,5 @@
+---
+- name: Configure load balancers
+ hosts: oo_lb_to_config
+ roles:
+ - role: openshift_loadbalancer
diff --git a/playbooks/adhoc/noc/filter_plugins b/playbooks/common/openshift-loadbalancer/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/adhoc/noc/filter_plugins
+++ b/playbooks/common/openshift-loadbalancer/filter_plugins
diff --git a/playbooks/common/openshift-docker/lookup_plugins b/playbooks/common/openshift-loadbalancer/lookup_plugins
index ac79701db..ac79701db 120000
--- a/playbooks/common/openshift-docker/lookup_plugins
+++ b/playbooks/common/openshift-loadbalancer/lookup_plugins
diff --git a/playbooks/common/openshift-loadbalancer/roles b/playbooks/common/openshift-loadbalancer/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
new file mode 100644
index 000000000..e06a14c89
--- /dev/null
+++ b/playbooks/common/openshift-loadbalancer/service.yml
@@ -0,0 +1,20 @@
+---
+- name: Populate g_service_nodes host group if needed
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - fail: msg="new_cluster_state is required to be injected in this playbook"
+ when: new_cluster_state is not defined
+
+ - name: Evaluate g_service_lb
+ add_host: name={{ item }} groups=g_service_lb
+ with_items: oo_host_group_exp | default([])
+
+- name: Change state on lb instance(s)
+ hosts: g_service_lb
+ connection: ssh
+ gather_facts: no
+ tasks:
+ - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index acd2f5b11..7a59f3ea3 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,6 +1,9 @@
---
- name: Set master facts and determine if external etcd certs need to be generated
hosts: oo_masters_to_config
+ vars:
+ t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
+
pre_tasks:
- name: Check for RPM generated config marker file .config_managed
stat:
@@ -28,42 +31,43 @@
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
-
+
- set_fact:
- openshift_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') | default(openshift.common.debug_level, true) }}"
- when: openshift_master_debug_level is not defined
+ openshift_master_debug_level: "{{ t_oo_option_master_debug_level }}"
+ when: openshift_master_debug_level is not defined and t_oo_option_master_debug_level != ""
+ - set_fact:
+ openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+ when: openshift_master_default_subdomain is not defined
+ - set_fact:
+ openshift_hosted_metrics_deploy: "{{ lookup('oo_option', 'openshift_hosted_metrics_deploy') | default(false, true) }}"
+ when: openshift_hosted_metrics_deploy is not defined
+ - set_fact:
+ openshift_hosted_metrics_duration: "{{ lookup('oo_option', 'openshift_hosted_metrics_duration') | default(7) }}"
+ when: openshift_hosted_metrics_duration is not defined
+ - set_fact:
+ openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}"
+ when: openshift_hosted_metrics_resolution is not defined
roles:
- openshift_facts
post_tasks:
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- ip: "{{ openshift_ip | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- public_ip: "{{ openshift_public_ip | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- - role: master
- local_facts:
- api_port: "{{ openshift_master_api_port | default(None) }}"
- api_url: "{{ openshift_master_api_url | default(None) }}"
- api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
- controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
- public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
- cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
- cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- console_path: "{{ openshift_master_console_path | default(None) }}"
- console_port: "{{ openshift_master_console_port | default(None) }}"
- console_url: "{{ openshift_master_console_url | default(None) }}"
- console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
- public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- portal_net: "{{ openshift_master_portal_net | default(None) }}"
- ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
- master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
+ role: master
+ local_facts:
+ api_port: "{{ openshift_master_api_port | default(None) }}"
+ api_url: "{{ openshift_master_api_url | default(None) }}"
+ api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
+ controllers_port: "{{ openshift_master_controllers_port | default(None) }}"
+ public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
+ cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
+ cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
+ console_path: "{{ openshift_master_console_path | default(None) }}"
+ console_port: "{{ openshift_master_console_port | default(None) }}"
+ console_url: "{{ openshift_master_console_url | default(None) }}"
+ console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
+ public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
+ master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- openshift_facts:
role: hosted
openshift_env:
@@ -83,6 +87,8 @@
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: master.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- name: Create temp directory for syncing certs
@@ -106,7 +112,7 @@
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- - etcd_certificates
+ - openshift_etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
@@ -114,7 +120,7 @@
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
@@ -122,7 +128,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: etcd_needing_client_certs
+ with_items: "{{ etcd_needing_client_certs | default([]) }}"
- name: Copy the external etcd certs to the masters
hosts: oo_masters_to_config
@@ -172,7 +178,7 @@
- name: Check status of master certificates
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: openshift_master_certs
+ with_items: "{{ openshift_master_certs }}"
register: g_master_cert_stat_result
- set_fact:
master_certs_missing: "{{ False in (g_master_cert_stat_result.results
@@ -198,6 +204,7 @@
| oo_collect('openshift.common.all_hostnames')
| oo_flatten | unique }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
roles:
- openshift_master_certificates
post_tasks:
@@ -207,7 +214,7 @@
state: absent
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
with_nested:
- - masters_needing_certs
+ - "{{ masters_needing_certs | default([]) }}"
- - master.etcd-client.crt
- master.etcd-client.key
@@ -217,7 +224,7 @@
-C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- with_items: masters_needing_certs
+ with_items: "{{ masters_needing_certs | default([]) }}"
- name: Retrieve the master cert tarball from the master
fetch:
@@ -226,31 +233,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: masters_needing_certs
-
-- name: Configure load balancers
- hosts: oo_lb_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- haproxy_frontend_port: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- haproxy_frontends:
- - name: atomic-openshift-api
- mode: tcp
- options:
- - tcplog
- binds:
- - "*:{{ hostvars[groups.oo_first_master.0].openshift.master.api_port }}"
- default_backend: atomic-openshift-api
- haproxy_backends:
- - name: atomic-openshift-api
- mode: tcp
- option: tcplog
- balance: source
- servers: "{{ hostvars | oo_select_keys(groups['oo_masters']) | oo_haproxy_backend_masters }}"
- roles:
- - role: openshift_facts
- - role: haproxy
- when: hostvars[groups.oo_first_master.0].openshift.master.ha | bool
+ with_items: "{{ masters_needing_certs | default([]) }}"
- name: Check for cached session secrets
hosts: oo_first_master
@@ -316,13 +299,14 @@
file:
path: "{{ named_certs_dir }}"
state: directory
+ mode: 0700
when: named_certs_specified | bool
- name: Land named certificates
copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
- name: Land named certificate keys
- copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}"
+ copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600
with_items: openshift_master_named_certificates
when: named_certs_specified | bool
@@ -336,6 +320,14 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
@@ -357,13 +349,6 @@
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-# Additional instance config for online deployments
-- name: Additional instance config
- hosts: oo_masters_deployment_type_online
- roles:
- - pods
- - os_env_extras
-
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library
new file mode 120000
index 000000000..d0b7393d3
--- /dev/null
+++ b/playbooks/common/openshift-master/library
@@ -0,0 +1 @@
+../../../library/ \ No newline at end of file
diff --git a/playbooks/common/openshift-master/library/modify_yaml.py b/playbooks/common/openshift-master/library/modify_yaml.py
deleted file mode 100755
index a4be10ca3..000000000
--- a/playbooks/common/openshift-master/library/modify_yaml.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-''' modify_yaml ansible module '''
-
-import yaml
-
-DOCUMENTATION = '''
----
-module: modify_yaml
-short_description: Modify yaml key value pairs
-author: Andrew Butcher
-requirements: [ ]
-'''
-EXAMPLES = '''
-- modify_yaml:
- dest: /etc/origin/master/master-config.yaml
- yaml_key: 'kubernetesMasterConfig.masterCount'
- yaml_value: 2
-'''
-
-def main():
- ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting
- the key to the desired value.
- '''
-
- # disabling pylint errors for global-variable-undefined and invalid-name
- # for 'global module' usage, since it is required to use ansible_facts
- # pylint: disable=global-variable-undefined, invalid-name,
- # redefined-outer-name
- global module
-
- module = AnsibleModule(
- argument_spec=dict(
- dest=dict(required=True),
- yaml_key=dict(required=True),
- yaml_value=dict(required=True),
- backup=dict(required=False, default=True, type='bool'),
- ),
- supports_check_mode=True,
- )
-
- dest = module.params['dest']
- yaml_key = module.params['yaml_key']
- yaml_value = module.safe_eval(module.params['yaml_value'])
- backup = module.params['backup']
-
- # Represent null values as an empty string.
- # pylint: disable=missing-docstring, unused-argument
- def none_representer(dumper, data):
- return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
- yaml.add_representer(type(None), none_representer)
-
- try:
- changes = []
-
- yaml_file = open(dest)
- yaml_data = yaml.safe_load(yaml_file.read())
- yaml_file.close()
-
- ptr = yaml_data
- for key in yaml_key.split('.'):
- if key not in ptr and key != yaml_key.split('.')[-1]:
- ptr[key] = {}
- elif key == yaml_key.split('.')[-1]:
- if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):
- ptr[key] = yaml_value
- changes.append((yaml_key, yaml_value))
- else:
- ptr = ptr[key]
-
- if len(changes) > 0:
- if backup:
- module.backup_local(dest)
- yaml_file = open(dest, 'w')
- yaml_string = yaml.dump(yaml_data, default_flow_style=False)
- yaml_string = yaml_string.replace('\'\'', '""')
- yaml_file.write(yaml_string)
- yaml_file.close()
-
- return module.exit_json(changed=(len(changes) > 0), changes=changes)
-
- # ignore broad-except error to avoid stack trace to ansible user
- # pylint: disable=broad-except
- except Exception, e:
- return module.fail_json(msg=str(e))
-
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-# import module snippets
-from ansible.module_utils.basic import *
-
-if __name__ == '__main__':
- main()
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 02449e40d..57a63cfee 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -97,7 +97,7 @@
name: "{{ item }}"
groups: oo_active_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['is_active'] | default(false)) | bool
- name: Evaluate oo_current_masters
@@ -105,7 +105,7 @@
name: "{{ item }}"
groups: oo_current_masters
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_masters_to_config | default([]) }}"
when: (hostvars[item]['current_host'] | default(false)) | bool
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 6f8151d30..6e6cb3e01 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -33,9 +33,10 @@
service: name={{ openshift.common.service_type }}-master-controllers state=restarted
- name: verify api server
command: >
- curl -k --head --silent {{ openshift.master.api_url }}
+ curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {{ openshift.master.api_url }}/healthz/ready
register: api_available_output
- until: api_available_output.stdout.find("200 OK") != -1
+ until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
changed_when: false
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 7edea9160..b3491ef8d 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,10 +1,12 @@
---
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
+ vars:
+ t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
pre_tasks:
- set_fact:
- openshift_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') | default(openshift.common.debug_level, true) }}"
- when: openshift_node_debug_level is not defined
+ openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
+ when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
roles:
- openshift_facts
tasks:
@@ -12,20 +14,11 @@
# configured, we need to make sure to set the node properties beforehand if
# we do not want the defaults
- openshift_facts:
- role: "{{ item.role }}"
- local_facts: "{{ item.local_facts }}"
- with_items:
- - role: common
- local_facts:
- hostname: "{{ openshift_hostname | default(None) }}"
- public_hostname: "{{ openshift_public_hostname | default(None) }}"
- deployment_type: "{{ openshift_deployment_type }}"
- use_flannel: "{{ openshift_use_flannel | default(None) }}"
- - role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ role: node
+ local_facts:
+ labels: "{{ openshift_node_labels | default(None) }}"
+ annotations: "{{ openshift_node_annotations | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- name: Check status of node certificates
stat:
path: "{{ openshift.common.config_base }}/node/{{ item }}"
@@ -43,22 +36,6 @@
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
node_cert_dir: "{{ openshift.common.config_base }}/node"
- - name: Check status of flannel external etcd certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - node.etcd-client.crt
- - node.etcd-ca.crt
- register: g_external_etcd_flannel_cert_stat_result
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- - set_fact:
- etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- etcd_cert_prefix: node.etcd-
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- name: Create temp directory for syncing certs
hosts: localhost
@@ -71,65 +48,6 @@
register: mktemp
changed_when: False
-- name: Configure flannel etcd certificates
- hosts: oo_first_etcd
- vars:
- etcd_generated_certs_dir: /etc/etcd/generated_certs
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- pre_tasks:
- - set_fact:
- etcd_needing_client_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_filter_list(filter_attr='etcd_client_flannel_certs_missing') | default([]) }}"
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- roles:
- - role: etcd_certificates
- when: openshift_use_flannel | default(false) | bool
- post_tasks:
- - name: Create a tarball of the etcd flannel certs
- command: >
- tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
- -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
- args:
- creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- with_items: etcd_needing_client_certs
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - name: Retrieve the etcd cert tarballs
- fetch:
- src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: etcd_needing_client_certs
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
-
-- name: Copy the external etcd flannel certs to the nodes
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/node"
- state: directory
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_cert_config_dir }}"
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
- - file:
- path: "{{ etcd_cert_config_dir }}/{{ item }}"
- owner: root
- group: root
- mode: 0600
- with_items:
- - node.etcd-client.crt
- - node.etcd-client.key
- - node.etcd-ca.crt
- when: etcd_client_flannel_certs_missing is defined and etcd_client_flannel_certs_missing
-
- name: Create node certificates
hosts: oo_first_master
vars:
@@ -148,7 +66,7 @@
-C {{ item.config_dir }} .
args:
creates: "{{ item.config_dir }}.tgz"
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Retrieve the node config tarballs from the master
fetch:
@@ -157,7 +75,7 @@
flat: yes
fail_on_missing: yes
validate_checksum: yes
- with_items: nodes_needing_certs
+ with_items: "{{ nodes_needing_certs | default([]) }}"
- name: Deploy node certificates
hosts: oo_nodes_to_config
@@ -187,7 +105,7 @@
name: "{{ item }}"
groups: oo_containerized_master_nodes
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_sudo: "{{ g_sudo | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
@@ -197,6 +115,14 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- openshift_node
@@ -205,9 +131,96 @@
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- openshift_node
+- name: Gather and set facts for flannel certificatess
+ hosts: oo_nodes_to_config
+ tasks:
+ - name: Check status of flannel external etcd certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-ca.crt
+ register: g_external_etcd_flannel_cert_stat_result
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
+ - set_fact:
+ etcd_client_flannel_certs_missing: "{{ False in g_external_etcd_flannel_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list }}"
+ etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
+ etcd_cert_prefix: node.etcd-
+ etcd_hostname: "{{ openshift.common.hostname }}"
+ etcd_ip: "{{ openshift.common.ip }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 and (openshift.common.use_flannel | bool)
+
+- name: Configure flannel etcd certificates
+ hosts: oo_first_etcd
+ vars:
+ etcd_generated_certs_dir: /etc/etcd/generated_certs
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ pre_tasks:
+ - set_fact:
+ etcd_needing_client_certs: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_filter_list('etcd_client_flannel_certs_missing') | default([]) }}"
+ roles:
+ - role: openshift_etcd_certificates
+ when: openshift_use_flannel | default(false) | bool
+ post_tasks:
+ - name: Create a tarball of the etcd flannel certs
+ command: >
+ tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
+ -C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
+ args:
+ creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ with_items: etcd_needing_client_certs | default([])
+ - name: Retrieve the etcd cert tarballs
+ fetch:
+ src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
+ dest: "{{ sync_tmpdir }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items: etcd_needing_client_certs | default([])
+
+- name: Copy the external etcd flannel certs to the nodes
+ hosts: oo_nodes_to_config
+ vars:
+ sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+ tasks:
+ - name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift.common.config_base }}/node"
+ state: directory
+ when: etcd_client_flannel_certs_missing | default(false) | bool
+ - name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
+ dest: "{{ etcd_cert_config_dir }}"
+ when: etcd_client_flannel_certs_missing | default(false) | bool
+ - file:
+ path: "{{ etcd_cert_config_dir }}/{{ item }}"
+ owner: root
+ group: root
+ mode: 0600
+ with_items:
+ - node.etcd-client.crt
+ - node.etcd-client.key
+ - node.etcd-ca.crt
+ when: etcd_client_flannel_certs_missing | default(false) | bool
+
+
- name: Additional node config
hosts: oo_nodes_to_config
vars:
@@ -235,14 +248,6 @@
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-# Additional config for online type deployments
-- name: Additional instance config
- hosts: oo_nodes_deployment_type_online
- gather_facts: no
- roles:
- - os_env_extras
- - os_env_extras_node
-
- name: Set schedulability
hosts: oo_first_master
vars:
@@ -258,9 +263,10 @@
# Using curl here since the uri module requires python-httplib2 and
# wait_for port doesn't provide health information.
command: >
- curl -k --head --silent {{ openshift.master.api_url }}
+ curl --silent --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {{ openshift.master.api_url }}/healthz/ready
register: api_available_output
- until: api_available_output.stdout.find("200 OK") != -1
+ until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
changed_when: false
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index d36f7acea..1d79db353 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -1,6 +1,11 @@
---
- include: ../openshift-cluster/evaluate_groups.yml
+- name: Gather facts
+ hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
+ roles:
+ - openshift_facts
+
- name: Configure docker hosts
hosts: oo_nodes_to_config
vars:
diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml
index b989e15fa..a7baea915 100644
--- a/playbooks/gce/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/gce/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index ba37a3a1f..b973c513f 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -1,18 +1,33 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../gce/openshift-cluster/vars.yml
- - ../../gce/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/gce/openshift-cluster/library/gce.py b/playbooks/gce/openshift-cluster/library/gce.py
new file mode 100644
index 000000000..fcaa3b850
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/library/gce.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: gce
+version_added: "1.4"
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/products/compute-engine) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ image:
+ description:
+ - image string to use for the instance
+ required: false
+ default: "debian-7"
+ instance_names:
+ description:
+ - a comma-separated list of instance names to create or destroy
+ required: false
+ default: null
+ machine_type:
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ required: false
+ default: "n1-standard-1"
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ required: false
+ default: null
+ service_account_email:
+ version_added: "1.5.1"
+ description:
+ - service account email
+ required: false
+ default: null
+ service_account_permissions:
+ version_added: "2.0"
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ required: false
+ default: null
+ choices: [
+ "bigquery", "cloud-platform", "compute-ro", "compute-rw",
+ "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
+ "monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
+ "storage-rw", "taskqueue", "userinfo-email"
+ ]
+ pem_file:
+ version_added: "1.5.1"
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ project_id:
+ version_added: "1.5.1"
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+ name:
+ description:
+ - identifier when working with a single instance
+ required: false
+ network:
+ description:
+ - name of the network, 'default' will be used if not specified
+ required: false
+ default: "default"
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ required: false
+ default: "false"
+ disks:
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ required: false
+ default: null
+ version_added: "1.7"
+ state:
+ description:
+ - desired state of the resource
+ required: false
+ default: "present"
+ choices: ["active", "present", "absent", "deleted"]
+ tags:
+ description:
+ - a comma-separated list of tags to associate with the instance
+ required: false
+ default: null
+ zone:
+ description:
+ - the GCE zone to use
+ required: true
+ default: "us-central1-a"
+ ip_forward:
+ version_added: "1.9"
+ description:
+ - set to true if the instance can forward ip packets (useful for
+ gateways)
+ required: false
+ default: "false"
+ external_ip:
+ version_added: "1.9"
+ description:
+ - type of external ip, ephemeral by default
+ required: false
+ default: "ephemeral"
+ disk_auto_delete:
+ version_added: "1.9"
+ description:
+ - if set boot disk will be removed after instance destruction
+ required: false
+ default: "true"
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3"
+notes:
+ - Either I(name) or I(instance_names) is required.
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 7 instance in the
+# us-central1-a Zone of n1-standard-1 machine type.
+- local_action:
+ module: gce
+ name: test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-7
+
+# Example using defaults and with metadata to create a single 'foo' instance
+- local_action:
+ module: gce
+ name: foo
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+
+
+# Launch instances from a control node, runs some tasks on the new instances,
+# and then terminate them
+- name: Create a sandbox instance
+ hosts: localhost
+ vars:
+ names: foo,bar
+ machine_type: n1-standard-1
+ image: debian-6
+ zone: us-central1-a
+ service_account_email: unique-email@developer.gserviceaccount.com
+ pem_file: /path/to/pem_file
+ project_id: project-id
+ tasks:
+ - name: Launch instances
+ local_action: gce instance_names={{names}} machine_type={{machine_type}}
+ image={{image}} zone={{zone}}
+ service_account_email={{ service_account_email }}
+ pem_file={{ pem_file }} project_id={{ project_id }}
+ register: gce
+ - name: Wait for SSH to come up
+ local_action: wait_for host={{item.public_ip}} port=22 delay=10
+ timeout=60 state=started
+ with_items: {{gce.instance_data}}
+
+- name: Configure instance(s)
+ hosts: launched
+ sudo: True
+ roles:
+ - my_awesome_role
+ - my_awesome_tasks
+
+- name: Terminate instances
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ instance_names: {{gce.instance_names}}
+
+'''
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except:
+ netname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ service_account_permissions = module.params.get('service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+
+ if external_ip == "none":
+ external_ip = None
+
+ new_instances = []
+ changed = False
+
+ lc_image = gce.ex_get_image(image)
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name']))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type)
+ lc_zone = gce.ex_get_zone(zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP.keys():
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email:
+ ex_sa_perms.append({'email': service_account_email})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ for name in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.create_volume(None, "%s" % name, image=lc_image)
+ except ResourceExistsError:
+ pd = gce.ex_get_volume("%s" % name, lc_zone)
+ inst = None
+ try:
+ inst = gce.create_node(
+ name, lc_machine_type, lc_image, location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
+ external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ changed = True
+ except ResourceExistsError:
+ inst = gce.ex_get_node(name, lc_zone)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (name, e.value))
+
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i+1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ if inst:
+ new_instances.append(inst)
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def terminate_instances(module, gce, instance_names, zone_name):
+ """Terminates a list of instances.
+
+ module: Ansible module object
+ gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone_name: the zone where the instances reside prior to termination
+
+ Returns a dictionary of instance names that were terminated.
+
+ """
+ changed = False
+ terminated_instance_names = []
+ for name in instance_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone_name)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if inst:
+ gce.destroy_node(inst)
+ terminated_instance_names.append(inst.name)
+ changed = True
+
+ return (changed, terminated_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-7'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(),
+ network=dict(default='default'),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(choices=['ephemeral', 'none'],
+ default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ )
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ instance_names = module.params.get('instance_names')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ name = module.params.get('name')
+ network = module.params.get('network')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ ip_forward = module.params.get('ip_forward')
+ changed = False
+
+ inames = []
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames.append(name)
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted']:
+ json_output['state'] = 'absent'
+ (changed, terminated_instance_names) = terminate_instances(
+ module, gce, inames, zone)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names:
+ json_output['instance_names'] = terminated_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.gce import *
+if __name__ == '__main__':
+ main()
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 992033d16..c29cac272 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List Hosts
diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml
index 914f38c1f..13b267976 100644
--- a/playbooks/gce/openshift-cluster/service.yml
+++ b/playbooks/gce/openshift-cluster/service.yml
@@ -15,14 +15,14 @@
name: "{{ item }}"
groups: g_service_nodes
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- add_host:
name: "{{ item }}"
groups: g_service_masters
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
- include: ../../common/openshift-node/service.yml
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 8ebf71cd4..c5c479052 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -17,6 +17,11 @@
- clusterid-{{ cluster_id }}
- host-type-{{ type }}
- sub-host-type-{{ g_sub_host_type }}
+ metadata:
+ startup-script: |
+ #!/bin/bash
+ echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}
+
when: instances |length > 0
register: gce
@@ -39,7 +44,7 @@
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.public_ip }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index d835c53ba..6a0ac088a 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
- name: Unsubscribe VMs
diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml
index 2dc540978..332f27da7 100644
--- a/playbooks/gce/openshift-cluster/update.yml
+++ b/playbooks/gce/openshift-cluster/update.yml
@@ -1,20 +1,33 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index d173213fc..13c754c1e 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -5,19 +5,14 @@ deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
- sudo: yes
- online:
- image: libra-rhel7
- machine_type: n1-standard-1
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
index b989e15fa..a7baea915 100644
--- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 0e003ef67..032d4cf68 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,22 +2,36 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../libvirt/openshift-cluster/vars.yml
- - ../../libvirt/openshift-cluster/cluster_hosts.yml
vars:
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}"
openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}"
openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}"
+ openshift_use_dnsmasq: false
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 3a48c82bc..2475b9d6b 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -7,17 +7,11 @@
vars_files:
- vars.yml
vars:
- os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
- os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
- os_libvirt_network: "{{ libvirt_network | default('default') }}"
image_url: "{{ deployment_vars[deployment_type].image.url }}"
image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
image_name: "{{ deployment_vars[deployment_type].image.name }}"
image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
tasks:
- - fail: msg="Deployment type not supported for libvirt provider yet"
- when: deployment_type == 'online'
-
- include: tasks/configure_libvirt.yml
- include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index 6cb81ee79..eb64544db 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -15,7 +15,7 @@
name: "{{ item }}"
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml
index cd07c8701..8bd24a8cf 100644
--- a/playbooks/libvirt/openshift-cluster/service.yml
+++ b/playbooks/libvirt/openshift-cluster/service.yml
@@ -18,7 +18,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: g_service_masters
with_items: "{{ g_master_hosts | default([]) }}"
@@ -26,7 +26,7 @@
add_host:
name: "{{ item }}"
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: g_service_nodes
with_items: "{{ g_node_hosts | default([]) }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index b00352539..833586ffa 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -1,7 +1,7 @@
---
# TODO: Add support for choosing base image based on deployment_type and os
# wanted (os wanted needs support added in bin/cluster with sane defaults:
-# fedora/centos for origin, rhel for online/enterprise)
+# fedora/centos for origin, rhel for enterprise)
# TODO: create a role to encapsulate some of this complexity, possibly also
# create a module to manage the storage tasks, network tasks, and possibly
@@ -13,54 +13,58 @@
get_url:
url: '{{ image_url }}'
sha256sum: '{{ image_sha256 }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | reject("equalto", "") | join(".") }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
register: downloaded_image
- name: Uncompress xz compressed base cloud image
- command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
args:
- creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
when: image_compression in ["xz"] and downloaded_image.changed
- name: Uncompress tgz compressed base cloud image
- command: 'tar zxvf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
args:
- creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
when: image_compression in ["tgz"] and downloaded_image.changed
- name: Uncompress gzip compressed base cloud image
- command: 'gunzip {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+ command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
args:
- creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+ creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}'
when: image_compression in ["gz"] and downloaded_image.changed
- name: Create the cloud-init config drive path
file:
- dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: directory
with_items: instances
- name: Create the cloud-init config drive files
template:
src: '{{ item[1] }}'
- dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
+ dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
with_nested:
- instances
- [ user-data, meta-data ]
- name: Create the cloud-init config drive
- command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+ command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
args:
- chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
- creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+ chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+ creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
with_items: instances
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
-- name: Create VMs drives
- command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+- name: Create VM drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+ with_items: instances
+
+- name: Create VM docker drives
+ command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
with_items: instances
- name: Create VMs
@@ -79,7 +83,7 @@
with_items: instances
- name: Wait for the VMs to get an IP
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
retries: 60
@@ -87,7 +91,7 @@
when: instances | length != 0
- name: Collect IP addresses of the VMs
- shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
+ shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
register: scratch_ip
with_items: instances
@@ -109,7 +113,7 @@
hostname: '{{ item.0 }}'
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
with_together:
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index 0ca8e0974..8e96cec8d 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -1,6 +1,6 @@
<domain type='kvm' id='8'>
<name>{{ item }}</name>
- <memory unit='GiB'>1</memory>
+ <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory>
<metadata xmlns:ansible="https://github.com/ansible/ansible">
<ansible:tags>
<ansible:tag>environment-{{ cluster_env }}</ansible:tag>
@@ -9,8 +9,7 @@
<ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
</ansible:tags>
</metadata>
- <currentMemory unit='GiB'>1</currentMemory>
- <vcpu placement='static'>2</vcpu>
+ <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
@@ -32,18 +31,23 @@
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
- <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
- <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
- <target dev='vdb' bus='virtio'/>
+ <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+ <target dev='vdc' bus='virtio'/>
<readonly/>
</disk>
<controller type='usb' index='0' />
<interface type='network'>
- <source network='{{ os_libvirt_network }}'/>
+ <source network='{{ libvirt_network }}'/>
<model type='virtio'/>
</interface>
<serial type='pty'>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index ead881f78..8b79940f4 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -4,6 +4,9 @@ disable_root: true
hostname: {{ item[0] }}
fqdn: {{ item[0] }}.example.com
+mounts:
+- [ vdb ]
+
users:
- default
- name: root
@@ -23,6 +26,12 @@ write_files:
permissions: 440
content: |
Defaults:openshift !requiretty
+ - content: |
+ DEVS=/dev/vdb
+ VG=docker_vg
+ path: /etc/sysconfig/docker-storage-setup
+ owner: root:root
+ permissions: '0644'
runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index f4749c28d..baef911f9 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -14,7 +14,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[cluster_group] | default([])
- name: Unsubscribe VMs
@@ -45,12 +45,18 @@
- groups['oo_hosts_to_terminate']
- [ destroy, undefine ]
- - name: Delete VMs drives
+ - name: Delete VM drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
with_items: groups['oo_hosts_to_terminate']
+ - name: Delete VM docker drives
+ command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
+ args:
+ removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
+ with_items: groups['oo_hosts_to_terminate']
+
- name: Delete the VM cloud-init image
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 2dc540978..28362c984 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
@@ -13,8 +29,8 @@
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml
index f28245f88..4daaf1c91 100644
--- a/playbooks/libvirt/openshift-cluster/vars.yml
+++ b/playbooks/libvirt/openshift-cluster/vars.yml
@@ -1,8 +1,11 @@
---
-libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
-libvirt_storage_pool: 'openshift-ansible'
-libvirt_network: openshift-ansible
-libvirt_uri: 'qemu:///system'
+default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}"
+libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}"
+libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}"
+libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}"
+libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}"
+libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}"
debug_level: 2
# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
@@ -17,7 +20,7 @@ deployment_rhel7_ent_base:
default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
compression: ""
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
@@ -31,14 +34,7 @@ deployment_vars:
sha256: "{{ lookup('oo_option', 'image_sha256') |
default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
ssh_user: openshift
- sudo: yes
- online:
- image:
- url:
- name:
- sha256:
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 9a3361919..119b376aa 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -16,6 +16,6 @@ g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | defa
g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra']) | default([]) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 093beaf03..6e4f414d6 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,17 +1,30 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: ../../common/openshift-cluster/config.yml
- vars_files:
- - ../../openstack/openshift-cluster/vars.yml
- - ../../openstack/openshift-cluster/cluster_hosts.yml
vars:
g_nodeonmaster: true
g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ g_sudo: "{{ deployment_vars[deployment_type].become }}"
openshift_cluster_id: "{{ cluster_id }}"
openshift_debug_level: "{{ debug_level }}"
openshift_deployment_type: "{{ deployment_type }}"
openshift_registry_selector: 'type=infra'
- openshift_router_selector: 'type=infra'
+ openshift_hosted_router_selector: 'type=infra'
openshift_infra_nodes: "{{ g_infra_hosts }}"
openshift_master_cluster_method: 'native'
openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
index 5e7671a48..31113d5f0 100644
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ b/playbooks/openstack/openshift-cluster/dns.yml
@@ -12,7 +12,7 @@
name: "{{ item }}"
groups: oo_dns_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups[cluster_id ~ '-dns'] }}"
- name: Evaluate oo_hosts_to_add_in_dns
@@ -20,7 +20,7 @@
name: "{{ item }}"
groups: oo_hosts_to_add_in_dns
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}"
- name: Gather facts
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index af774aa32..422e6dafe 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -288,6 +288,14 @@ resources:
port_range_max: 53
- direction: ingress
protocol: tcp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: udp
+ port_range_min: 8053
+ port_range_max: 8053
+ - direction: ingress
+ protocol: tcp
port_range_min: 24224
port_range_max: 24224
- direction: ingress
@@ -350,7 +358,6 @@ resources:
port_range_min: 10250
port_range_max: 10250
remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- direction: ingress
protocol: udp
port_range_min: 4789
@@ -592,12 +599,22 @@ resources:
type: OS::Heat::MultipartMime
properties:
parts:
- - config: { get_file: user-data }
- config:
str_replace:
template: |
#cloud-config
+ disable_root: true
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
- path: /etc/sysconfig/network-scripts/ifcfg-eth0
content: |
DEVICE="eth0"
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
index e789a5b69..eb65f7cec 100644
--- a/playbooks/openstack/openshift-cluster/files/user-data
+++ b/playbooks/openstack/openshift-cluster/files/user-data
@@ -5,3 +5,9 @@ system_info:
default_user:
name: openshift
sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ content: |
+ Defaults:openshift !requiretty
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 0afcad72e..b9aae2f4c 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -7,10 +7,6 @@
vars_files:
- vars.yml
tasks:
- - fail:
- msg: "Deployment type not supported for OpenStack provider yet"
- when: deployment_type == 'online'
-
# TODO: Write an Ansible module for dealing with HEAT stacks
# Dealing with the outputs is currently terrible
@@ -50,7 +46,7 @@
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P dns_flavor=m1.small
+ -P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
- name: Wait for OpenStack Stack readiness
@@ -106,7 +102,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "etcd"
@@ -120,7 +116,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "master"
@@ -134,7 +130,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "compute"
@@ -148,7 +144,7 @@
hostname: '{{ item[0] }}'
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "infra"
@@ -162,7 +158,7 @@
hostname: '{{ parsed_outputs.dns_name }}'
ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
groups: '{{ cluster_id }}-dns'
- name: Wait for ssh
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index 123ebd323..ba9c6bf9c 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -16,7 +16,7 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: groups[scratch_group] | default([]) | difference(['localhost'])
- name: List Hosts
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index a1fb41b53..5bd8476f1 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -10,7 +10,7 @@
name: "{{ item }}"
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
- name: Unsubscribe VMs
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 16027b15c..6d4d23963 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -1,4 +1,20 @@
---
+- hosts: localhost
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: vars.yml
+ - include_vars: cluster_hosts.yml
+
- include: dns.yml
- name: Populate oo_hosts_to_update group
@@ -6,17 +22,14 @@
connection: local
become: no
gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
tasks:
- name: Evaluate oo_hosts_to_update
add_host:
name: "{{ item }}"
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: "{{ g_all_hosts | default([]) }}"
+ ansible_become: "{{ deployment_vars[deployment_type].become }}"
+ with_items: g_all_hosts | default([])
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index ee26d223e..bc53a51b0 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -13,6 +13,7 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
+ dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"
@@ -21,17 +22,13 @@ openstack_flavor:
deployment_rhel7_ent_base:
image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
ssh_user: openshift
- sudo: yes
+ become: yes
deployment_vars:
origin:
image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
ssh_user: openshift
- sudo: yes
- online:
- image:
- ssh_user: root
- sudo: no
+ become: yes
enterprise: "{{ deployment_rhel7_ent_base }}"
openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
atomic-enterprise: "{{ deployment_rhel7_ent_base }}"