summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml29
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml17
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup2
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml142
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml104
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml69
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j220
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml71
-rw-r--r--playbooks/adhoc/uninstall.yml134
-rw-r--r--playbooks/adhoc/upgrades/README.md21
l---------playbooks/adhoc/upgrades/filter_plugins1
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml128
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml57
-rw-r--r--playbooks/adhoc/zabbix_setup/create_template.yml57
l---------playbooks/adhoc/zabbix_setup/filter_plugins2
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml13
l---------playbooks/adhoc/zabbix_setup/roles2
-rw-r--r--playbooks/adhoc/zabbix_setup/setup_zabbix.yml38
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml11
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_host.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_master.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_node.yml27
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml90
-rw-r--r--playbooks/adhoc/zabbix_setup/vars/template_router.yml27
-rw-r--r--playbooks/aws/openshift-cluster/config.yml2
-rw-r--r--playbooks/aws/openshift-cluster/launch.yml5
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml31
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.int.yml2
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.prod.yml2
-rw-r--r--playbooks/aws/openshift-cluster/vars.online.stage.yml2
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/create_services.yml8
-rw-r--r--playbooks/common/openshift-etcd/config.yml1
-rw-r--r--playbooks/common/openshift-master/config.yml43
-rw-r--r--playbooks/common/openshift-master/service.yml4
-rw-r--r--playbooks/common/openshift-node/config.yml21
-rw-r--r--playbooks/common/openshift-node/service.yml4
-rw-r--r--playbooks/gce/openshift-cluster/config.yml7
-rw-r--r--playbooks/gce/openshift-cluster/join_node.yml49
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml33
-rw-r--r--playbooks/gce/openshift-cluster/list.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml32
-rw-r--r--playbooks/gce/openshift-cluster/terminate.yml55
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/launch.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml7
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/user-data2
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml42
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml33
-rw-r--r--playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml27
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml8
60 files changed, 1319 insertions, 503 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
new file mode 100644
index 000000000..c14d08e87
--- /dev/null
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -0,0 +1,29 @@
+# This deletes *ALL* Docker images, and uninstalls OpenShift and
+# Atomic Enterprise RPMs. It is primarily intended for use
+# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
+
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - shell: docker ps -a -q | xargs docker stop
+ changed_when: False
+ failed_when: False
+
+ - shell: docker ps -a -q| xargs docker rm
+ changed_when: False
+ failed_when: False
+
+ - shell: docker images -q |xargs docker rmi
+ changed_when: False
+ failed_when: False
+
+ - user: name={{ item }} state=absent remove=yes
+ with_items:
+ - alice
+ - joe
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 684a0ca72..4f0ef7a75 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -50,6 +50,16 @@
- debug: var=vol
+ - name: tag the vol with a name
+ ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}}
+ args:
+ tags:
+ Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
+ env: "{{cli_environment}}"
+ register: voltags
+
+ - debug: var=voltags
+
- name: Configure the drive
gather_facts: no
hosts: oo_master
@@ -118,6 +128,13 @@
state: unmounted
fstype: ext4
+ - name: remove from fstab
+ mount:
+ name: "{{ pv_mntdir }}"
+ src: "{{ cli_device_name }}"
+ state: absent
+ fstype: ext4
+
- name: detach drive
delegate_to: localhost
ec2_vol:
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
new file mode 100644
index 000000000..059058823
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
@@ -0,0 +1,2 @@
+DEVS=/dev/xvdb
+VG=docker_vg
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
new file mode 100644
index 000000000..b6a2d2f26
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -0,0 +1,142 @@
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker)
+# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use.
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+# Notes:
+# * By default this will do a 30GB volume.
+# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+#
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 30
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: check to see if /dev/xvdb exists
+ command: test -e /dev/xvdb
+ register: xvdb_check
+ ignore_errors: yes
+
+ - debug: var=xvdb_check
+
+ - name: fail if /dev/xvdb already exists
+ fail:
+ msg: /dev/xvdb already exists. Please investigate
+ when: xvdb_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdb
+ register: vol
+
+ - debug: var=vol
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ env: "{{ ec2_tag_environment }}"
+ register: voltags
+
+ - name: Wait for volume to attach
+ pause:
+ seconds: 30
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ src: docker-storage-setup
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: start docker
+ command: systemctl start docker.service
+ register: dockerstart
+
+ - debug: var=dockerstart
+
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..614b2537a
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,104 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: start docker
+ command: systemctl start docker.service
+ register: dockerstart
+
+ - debug: var=dockerstart
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
new file mode 100644
index 000000000..a19291a9f
--- /dev/null
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -0,0 +1,69 @@
+---
+# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues.
+#
+# To run:
+#
+# 1. run the playbook:
+#
+# ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml
+#
+# Example:
+#
+# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml
+#
+# Notes:
+# * This *should* not interfere with running docker images
+#
+
+- name: Clean up Docker Storage
+ gather_facts: no
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+
+ pre_tasks:
+
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+
+ - name: Ensure docker is running
+ service:
+ name: docker
+ state: started
+ enabled: yes
+
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
+
+ - name: Remove exited and dead containers
+ shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm"
+ ignore_errors: yes
+
+ - name: Remove dangling docker images
+ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi"
+ ignore_errors: yes
+
+ - name: Remove non-running docker images
+ shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+ ignore_errors: yes
+
+ # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
+ - name: update zabbix docker items
+ command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+
+ # Get and show docker info again.
+ - name: Get docker info
+ command: docker info
+ register: docker_info
+
+ - name: Show docker info
+ debug:
+ var: docker_info.stdout_lines
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..63d473146
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 55
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ env: "{{ ec2_tag_environment }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 43200
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
new file mode 100644
index 000000000..acfa89515
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -0,0 +1,20 @@
+version: 0.1
+log:
+ level: debug
+http:
+ addr: :5000
+storage:
+ cache:
+ layerinfo: inmemory
+ s3:
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
+ region: us-east-1
+ bucket: {{ clusterid }}-docker
+ encrypt: true
+ secure: true
+ v4auth: true
+ rootdirectory: /registry
+middleware:
+ repository:
+ - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
new file mode 100644
index 000000000..4dcef1a42
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -0,0 +1,71 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+# ansible-playbook s3_registry.yml -e clusterid="mycluster"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
+ remote_user: root
+ gather_facts: False
+
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+
+ tasks:
+
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
+ - name: Create S3 bucket
+ local_action:
+ module: s3 bucket="{{ clusterid }}-docker" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+ - name: Generate docker registry config
+ template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+ - name: Determine if new secrets are needed
+ command: oc get secrets
+ register: secrets
+
+ - name: Create registry secrets
+ command: oc secrets new dockerregistry /root/config.yml
+ when: "'dockerregistry' not in secrets.stdout"
+
+ - name: Determine if service account contains secrets
+ command: oc describe serviceaccount/registry
+ register: serviceaccount
+
+ - name: Add secrets to registry service account
+ command: oc secrets add serviceaccount/registry secrets/dockerregistry
+ when: "'dockerregistry' not in serviceaccount.stdout"
+
+ - name: Determine if deployment config contains secrets
+ command: oc volume dc/docker-registry --list
+ register: dc
+
+ - name: Add secrets to registry deployment config
+ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+ when: "'dockersecrets' not in dc.stdout"
+
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
+ - name: Scale up registry
+ command: oc scale --replicas=1 dc/docker-registry
+
+ - name: Delete temporary config file
+ file: path=/root/config.yml state=absent
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..40db668da
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,134 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - atomic-openshift-node
+ - etcd
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - openshift-node
+ - openvswitch
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - origin-node
+
+ - yum: name={{ item }} state=absent
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3/aep
+ - openshift3/ose
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry.access.redhat.com/openshift3
+ - registry.access.redhat.com/aep3
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/etcd
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-node
+ - /root/.kube
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/etcd
+ - /var/lib/openshift
+ - /var/lib/origin
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md
new file mode 100644
index 000000000..6de8a970f
--- /dev/null
+++ b/playbooks/adhoc/upgrades/README.md
@@ -0,0 +1,21 @@
+# [NOTE]
+This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+# Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+# Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
new file mode 120000
index 000000000..b0b7a3414
--- /dev/null
+++ b/playbooks/adhoc/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
new file mode 120000
index 000000000..73cafffe5
--- /dev/null
+++ b/playbooks/adhoc/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/adhoc/upgrades/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
new file mode 100644
index 000000000..56a1df860
--- /dev/null
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -0,0 +1,128 @@
+---
+- name: Re-Run cluster configuration to apply latest configuration changes
+ include: ../../common/openshift-cluster/config.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade masters
+ hosts: masters
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+ hosts: nodes
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade node packages
+ yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+- name: Update cluster policy bindings
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-role-bindings --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-role-bindings
+ --exclude-groups=system:authenticated
+ --exclude-groups=system:unauthenticated
+ --exclude-users=system:anonymous
+ --additive-only=true --confirm
+ when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
+
+- name: Upgrade default router
+ hosts: oo_first_master
+ vars:
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+ hosts: oo_first_master
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: oo_first_master
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
index a31cbef65..1e884240a 100644
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -2,50 +2,57 @@
- hosts: localhost
gather_facts: no
vars:
- g_zserver: http://localhost/zabbix/api_jsonrpc.php
- g_zuser: Admin
- g_zpassword: zabbix
+ g_server: http://localhost:8080/zabbix/api_jsonrpc.php
+ g_user: ''
+ g_password: ''
+
roles:
- - ../../../roles/os_zabbix
- post_tasks:
+ - lib_zabbix
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
+ post_tasks:
+ - name: CLEAN List template for heartbeat
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
state: list
name: 'Template Heartbeat'
register: templ_heartbeat
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
state: list
name: 'Template App Zabbix Server'
register: templ_zabbix_server
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
+ - name: CLEAN List template app zabbix server
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
state: list
name: 'Template App Zabbix Agent'
register: templ_zabbix_agent
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
+ - name: CLEAN List all templates
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
state: list
register: templates
- debug: var=templ_heartbeat.results
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
+ - name: Remove templates if heartbeat template is missing
+ zbx_template:
+ zbx_server: "{{ g_server }}"
+ zbx_user: "{{ g_user }}"
+ zbx_password: "{{ g_password }}"
+ name: "{{ item }}"
state: absent
with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml
deleted file mode 100644
index 50fff53b2..000000000
--- a/playbooks/adhoc/zabbix_setup/create_template.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- debug: var=ctp_template
-
-- name: Create Template
- zbx_template:
- server: "{{ ctp_zserver }}"
- user: "{{ ctp_zuser }}"
- password: "{{ ctp_zpassword }}"
- name: "{{ ctp_template.name }}"
- register: ctp_created_template
-
-- debug: var=ctp_created_template
-
-#- name: Create Application
-# zbxapi:
-# server: "{{ ctp_zserver }}"
-# user: "{{ ctp_zuser }}"
-# password: "{{ ctp_zpassword }}"
-# zbx_class: Application
-# state: present
-# params:
-# name: "{{ ctp_template.application.name}}"
-# hostid: "{{ ctp_created_template.results[0].templateid }}"
-# search:
-# name: "{{ ctp_template.application.name}}"
-# register: ctp_created_application
-
-#- debug: var=ctp_created_application
-
-- name: Create Items
- zbx_item:
- server: "{{ ctp_zserver }}"
- user: "{{ ctp_zuser }}"
- password: "{{ ctp_zpassword }}"
- key: "{{ item.key }}"
- name: "{{ item.name | default(item.key, true) }}"
- value_type: "{{ item.value_type | default('int') }}"
- template_name: "{{ ctp_template.name }}"
- with_items: ctp_template.zitems
- register: ctp_created_items
-
-#- debug: var=ctp_created_items
-
-- name: Create Triggers
- zbx_trigger:
- server: "{{ ctp_zserver }}"
- user: "{{ ctp_zuser }}"
- password: "{{ ctp_zpassword }}"
- description: "{{ item.description }}"
- expression: "{{ item.expression }}"
- priority: "{{ item.priority }}"
- with_items: ctp_template.ztriggers
- when: ctp_template.ztriggers is defined
-
-#- debug: var=ctp_created_triggers
-
-
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
index 99a95e4ca..b0b7a3414 120000
--- a/playbooks/adhoc/zabbix_setup/filter_plugins
+++ b/playbooks/adhoc/zabbix_setup/filter_plugins
@@ -1 +1 @@
-../../../filter_plugins \ No newline at end of file
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
new file mode 100755
index 000000000..0fe65b338
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
@@ -0,0 +1,7 @@
+#!/usr/bin/env ansible-playbook
+---
+- include: clean_zabbix.yml
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
new file mode 100755
index 000000000..e2b8150c6
--- /dev/null
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -0,0 +1,13 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ g_server: http://localhost/zabbix/api_jsonrpc.php
+ g_user: Admin
+ g_password: zabbix
+ roles:
+ - role: os_zabbix
+ ozb_server: "{{ g_server }}"
+ ozb_user: "{{ g_user }}"
+ ozb_password: "{{ g_password }}"
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
index e2b799b9d..20c4c58cf 120000
--- a/playbooks/adhoc/zabbix_setup/roles
+++ b/playbooks/adhoc/zabbix_setup/roles
@@ -1 +1 @@
-../../../roles/ \ No newline at end of file
+../../../roles \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
deleted file mode 100644
index 1729194b5..000000000
--- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- vars_files:
- - vars/template_heartbeat.yml
- - vars/template_os_linux.yml
- vars:
- g_zserver: http://localhost/zabbix/api_jsonrpc.php
- g_zuser: Admin
- g_zpassword: zabbix
- roles:
- - ../../../roles/os_zabbix
- post_tasks:
- - zbx_template:
- server: "{{ g_zserver }}"
- user: "{{ g_zuser }}"
- password: "{{ g_zpassword }}"
- state: list
- register: templates
-
- - debug: var=templates
-
- - name: Include Template
- include: create_template.yml
- vars:
- ctp_template: "{{ g_template_heartbeat }}"
- ctp_zserver: "{{ g_zserver }}"
- ctp_zuser: "{{ g_zuser }}"
- ctp_zpassword: "{{ g_zpassword }}"
-
- - name: Include Template
- include: create_template.yml
- vars:
- ctp_template: "{{ g_template_os_linux }}"
- ctp_zserver: "{{ g_zserver }}"
- ctp_zuser: "{{ g_zuser }}"
- ctp_zpassword: "{{ g_zpassword }}"
-
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
deleted file mode 100644
index 22cc75554..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-g_template_heartbeat:
- name: Template Heartbeat
- zitems:
- - name: Heartbeat Ping
- hostid:
- key: heartbeat.ping
- ztriggers:
- - description: 'Heartbeat.ping has failed on {HOST.NAME}'
- expression: '{Template Heartbeat:heartbeat.ping.last()}<>0'
- priority: avg
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml
deleted file mode 100644
index e7cc667cb..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_host.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_host:
- params:
- name: Template Host
- host: Template Host
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Host
- zitems:
- - name: Host Ping
- hostid:
- key_: host.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: host.ping
- ztriggers:
- - description: 'Host ping has failed on {HOST.NAME}'
- expression: '{Template Host:host.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Host ping has failed on*'
- expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml
deleted file mode 100644
index 5f9b41a4f..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_master.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_master:
- params:
- name: Template Master
- host: Template Master
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Master
- zitems:
- - name: Master Etcd Ping
- hostid:
- key_: master.etcd.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: master.etcd.ping
- ztriggers:
- - description: 'Master Etcd ping has failed on {HOST.NAME}'
- expression: '{Template Master:master.etcd.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Master Etcd ping has failed on*'
- expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml
deleted file mode 100644
index 98c343a24..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_node.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_node:
- params:
- name: Template Node
- host: Template Node
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Node
- zitems:
- - name: Kubelet Ping
- hostid:
- key_: kubelet.ping
- type: 2
- value_type: 0
- output: extend
- search:
- key_: kubelet.ping
- ztriggers:
- - description: 'Kubelet ping has failed on {HOST.NAME}'
- expression: '{Template Node:kubelet.ping.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Kubelet ping has failed on*'
- expandExpression: True
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
deleted file mode 100644
index 9cc038ffa..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
+++ /dev/null
@@ -1,90 +0,0 @@
----
-g_template_os_linux:
- name: Template OS Linux
- zitems:
- - key: kernel.uname.sysname
- value_type: string
-
- - key: kernel.all.cpu.wait.total
- value_type: int
-
- - key: kernel.all.cpu.irq.hard
- value_type: int
-
- - key: kernel.all.cpu.idle
- value_type: int
-
- - key: kernel.uname.distro
- value_type: string
-
- - key: kernel.uname.nodename
- value_type: string
-
- - key: kernel.all.cpu.irq.soft
- value_type: int
-
- - key: kernel.all.load.15_minute
- value_type: float
-
- - key: kernel.all.cpu.sys
- value_type: int
-
- - key: kernel.all.load.5_minute
- value_type: float
-
- - key: mem.freemem
- value_type: int
-
- - key: kernel.all.cpu.nice
- value_type: int
-
- - key: mem.util.bufmem
- value_type: int
-
- - key: swap.used
- value_type: int
-
- - key: kernel.all.load.1_minute
- value_type: float
-
- - key: kernel.uname.version
- value_type: string
-
- - key: swap.length
- value_type: int
-
- - key: mem.physmem
- value_type: int
-
- - key: kernel.all.uptime
- value_type: int
-
- - key: swap.free
- value_type: int
-
- - key: mem.util.used
- value_type: int
-
- - key: kernel.all.cpu.user
- value_type: int
-
- - key: kernel.uname.machine
- value_type: string
-
- - key: hinv.ncpu
- value_type: int
-
- - key: mem.util.cached
- value_type: int
-
- - key: kernel.all.cpu.steal
- value_type: int
-
- - key: kernel.all.pswitch
- value_type: int
-
- - key: kernel.uname.release
- value_type: string
-
- - key: proc.nprocs
- value_type: int
diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml
deleted file mode 100644
index 4dae7da1e..000000000
--- a/playbooks/adhoc/zabbix_setup/vars/template_router.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-g_template_router:
- params:
- name: Template Router
- host: Template Router
- groups:
- - groupid: 1 # FIXME (not real)
- output: extend
- search:
- name: Template Router
- zitems:
- - name: Router Backends down
- hostid:
- key_: router.backends.down
- type: 2
- value_type: 0
- output: extend
- search:
- key_: router.backends.down
- ztriggers:
- - description: 'Number of router backends down on {HOST.NAME}'
- expression: '{Template Router:router.backends.down.last()}<>0'
- priority: 3
- searchWildcardsEnabled: True
- search:
- description: 'Number of router backends down on {HOST.NAME}'
- expandExpression: True
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 8106d5da9..a8e3e27bb 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -17,7 +17,7 @@
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ec2_private_ip_address }}"
openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml
index a89275597..786918929 100644
--- a/playbooks/aws/openshift-cluster/launch.yml
+++ b/playbooks/aws/openshift-cluster/launch.yml
@@ -55,9 +55,4 @@
when: master_names is defined and master_names.0 is defined
- include: update.yml
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
-
- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 236d84e74..9c699120b 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -147,6 +147,35 @@
tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
tag_sub-host-type_{{ sub_host_type }}"
+- set_fact:
+ node_label:
+ region: "{{ec2_region}}"
+ type: "{{sub_host_type}}"
+ when: host_type == "node"
+
+- set_fact:
+ node_label:
+ region: "{{ec2_region}}"
+ type: "{{host_type}}"
+ when: host_type != "node"
+
+- set_fact:
+ logrotate:
+ - name: syslog
+ path: "/var/log/cron
+ \n/var/log/maillog
+ \n/var/log/messages
+ \n/var/log/secure
+ \n/var/log/spooler \n"
+ options:
+ - daily
+ - rotate 7
+ - compress
+ - sharedscripts
+ - missingok
+ scripts:
+ postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
+
- name: Add new instances groups and variables
add_host:
hostname: "{{ item.0 }}"
@@ -156,6 +185,8 @@
groups: "{{ instance_groups }}"
ec2_private_ip_address: "{{ item.1.private_ip }}"
ec2_ip_address: "{{ item.1.public_ip }}"
+ openshift_node_labels: "{{ node_label }}"
+ logrotate_scripts: "{{ logrotate }}"
with_together:
- instances
- ec2.instances
diff --git a/playbooks/aws/openshift-cluster/vars.online.int.yml b/playbooks/aws/openshift-cluster/vars.online.int.yml
index bb18e13b0..2e2f25ccd 100644
--- a/playbooks/aws/openshift-cluster/vars.online.int.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.int.yml
@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
ec2_master_security_groups: [ 'integration', 'integration-master' ]
ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
diff --git a/playbooks/aws/openshift-cluster/vars.online.prod.yml b/playbooks/aws/openshift-cluster/vars.online.prod.yml
index bbef9cc56..18a53e12e 100644
--- a/playbooks/aws/openshift-cluster/vars.online.prod.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.prod.yml
@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
ec2_master_security_groups: [ 'production', 'production-master' ]
ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'production', 'production-infra' ]
diff --git a/playbooks/aws/openshift-cluster/vars.online.stage.yml b/playbooks/aws/openshift-cluster/vars.online.stage.yml
index 9008a55ba..1f9ac4252 100644
--- a/playbooks/aws/openshift-cluster/vars.online.stage.yml
+++ b/playbooks/aws/openshift-cluster/vars.online.stage.yml
@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
ec2_image_name: libra-ops-rhel7*
ec2_region: us-east-1
ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
ec2_master_security_groups: [ 'stage', 'stage-master' ]
ec2_infra_instance_type: c4.large
ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 2ee1d50a7..9e50a4a18 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -5,5 +5,5 @@
g_masters_group: "{{ 'masters' }}"
g_nodes_group: "{{ 'nodes' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index cd282270f..6d7c12fd4 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,5 +1,5 @@
---
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
hosts: all
gather_facts: no
roles:
diff --git a/playbooks/common/openshift-cluster/create_services.yml b/playbooks/common/openshift-cluster/create_services.yml
deleted file mode 100644
index e70709d19..000000000
--- a/playbooks/common/openshift-cluster/create_services.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
- hosts: "{{ g_svc_master }}"
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 3cc561ba0..952960652 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -85,6 +85,7 @@
when: etcd_server_certs_missing
roles:
- etcd
+ - role: nickhammond.logrotate
- name: Delete temporary directory on localhost
hosts: localhost
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 904ad2dab..0d78eca30 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -37,7 +37,7 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- name: Check status of external etcd certificatees
stat:
- path: "/etc/openshift/master/{{ item }}"
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
with_items:
- master.etcd-client.crt
- master.etcd-ca.crt
@@ -47,7 +47,7 @@
| map(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
- etcd_cert_config_dir: /etc/openshift/master
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: master.etcd-
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
@@ -96,7 +96,7 @@
tasks:
- name: Ensure certificate directory exists
file:
- path: /etc/openshift/master
+ path: "{{ openshift.common.config_base }}/master"
state: directory
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- name: Unarchive the tarball on the master
@@ -134,7 +134,7 @@
- name: Check status of master certificates
stat:
- path: "/etc/openshift/master/{{ item }}"
+ path: "{{ openshift.common.config_base }}/master/{{ item }}"
with_items: openshift_master_certs
register: g_master_cert_stat_result
- set_fact:
@@ -142,12 +142,12 @@
| map(attribute='stat.exists')
| list | intersect([false])}}"
master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: /etc/openshift/master
+ master_cert_config_dir: "{{ openshift.common.config_base }}/master"
- name: Configure master certificates
hosts: oo_first_master
vars:
- master_generated_certs_dir: /etc/openshift/generated-configs
+ master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
@@ -186,10 +186,11 @@
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ embedded_etcd: "{{ openshift.master.embedded_etcd }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
- path: /etc/openshift/master
+ path: "{{ openshift.common.config_base }}/master"
state: directory
when: master_certs_missing and 'oo_first_master' not in group_names
- name: Unarchive the tarball on the master
@@ -199,6 +200,7 @@
when: master_certs_missing and 'oo_first_master' not in group_names
roles:
- openshift_master
+ - role: nickhammond.logrotate
- role: fluentd_master
when: openshift.common.use_fluentd | bool
post_tasks:
@@ -215,6 +217,17 @@
- role: openshift_master_cluster
when: openshift_master_ha | bool
- openshift_examples
+ - role: openshift_cluster_metrics
+ when: openshift.common.use_cluster_metrics | bool
+
+- name: Enable cockpit
+ hosts: oo_first_master
+ vars:
+ cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+ roles:
+ - role: cockpit
+ when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+ (osm_use_cockpit | bool or osm_use_cockpit is undefined )
# Additional instance config for online deployments
- name: Additional instance config
@@ -231,3 +244,19 @@
tasks:
- file: name={{ g_master_mktemp.stdout }} state=absent
changed_when: False
+
+- name: Configure service accounts
+ hosts: oo_first_master
+
+ vars:
+ accounts: ["router", "registry"]
+
+ roles:
+ - openshift_serviceaccounts
+
+- name: Create services
+ hosts: oo_first_master
+ roles:
+ - role: openshift_router
+ when: openshift.master.infra_nodes is defined
+ #- role: openshift_registry
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
index 5636ad156..27e1e66f9 100644
--- a/playbooks/common/openshift-master/service.yml
+++ b/playbooks/common/openshift-master/service.yml
@@ -10,9 +10,9 @@
add_host: name={{ item }} groups=g_service_masters
with_items: oo_host_group_exp | default([])
-- name: Change openshift-master state on master instance(s)
+- name: Change state on master instance(s)
hosts: g_service_masters
connection: ssh
gather_facts: no
tasks:
- - service: name=openshift-master state="{{ new_cluster_state }}"
+ - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 705f7f223..a14ca8e11 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -20,9 +20,10 @@
local_facts:
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
+ schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- name: Check status of node certificates
stat:
- path: "/etc/openshift/node/{{ item }}"
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
with_items:
- "system:node:{{ openshift.common.hostname }}.crt"
- "system:node:{{ openshift.common.hostname }}.key"
@@ -35,8 +36,8 @@
certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
- config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
- node_cert_dir: /etc/openshift/node
+ config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
+ node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -89,9 +90,9 @@
path: "{{ node_cert_dir }}"
state: directory
- # TODO: notify restart openshift-node
+ # TODO: notify restart node
# possibly test service started time against certificate/config file
- # timestamps in openshift-node to trigger notify
+ # timestamps in node to trigger notify
- name: Unarchive the tarball on the node
unarchive:
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
@@ -99,6 +100,7 @@
when: certs_missing
roles:
- openshift_node
+ - role: nickhammond.logrotate
- role: fluentd_node
when: openshift.common.use_fluentd | bool
tasks:
@@ -123,21 +125,14 @@
- os_env_extras
- os_env_extras_node
-- name: Set scheduleability
+- name: Set schedulability
hosts: oo_first_master
vars:
openshift_nodes: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'])
| oo_collect('openshift.common.hostname') }}"
- openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
- | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
pre_tasks:
- - set_fact:
- openshift_scheduleable_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
- | oo_collect('openshift.common.hostname')
- | difference(openshift_unscheduleable_nodes) }}"
roles:
- openshift_manage_node
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
index f76df089f..5cf83e186 100644
--- a/playbooks/common/openshift-node/service.yml
+++ b/playbooks/common/openshift-node/service.yml
@@ -10,9 +10,9 @@
add_host: name={{ item }} groups=g_service_nodes
with_items: oo_host_group_exp | default([])
-- name: Change openshift-node state on node instance(s)
+- name: Change state on node instance(s)
hosts: g_service_nodes
connection: ssh
gather_facts: no
tasks:
- - service: name=openshift-node state="{{ new_cluster_state }}"
+ - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 219ebe6a0..6ca4f7395 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -10,6 +10,8 @@
- set_fact:
g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+ use_sdn: "{{ do_we_use_openshift_sdn }}"
+ sdn_plugin: "{{ sdn_network_plugin }}"
- include: ../../common/openshift-cluster/config.yml
vars:
@@ -18,7 +20,10 @@
g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ gce_private_ip }}"
+ openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn }}"
+ os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"
diff --git a/playbooks/gce/openshift-cluster/join_node.yml b/playbooks/gce/openshift-cluster/join_node.yml
new file mode 100644
index 000000000..0dfa3e9d7
--- /dev/null
+++ b/playbooks/gce/openshift-cluster/join_node.yml
@@ -0,0 +1,49 @@
+---
+- name: Populate oo_hosts_to_update group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ node_ip }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- name: Populate oo_masters_to_config host group
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ node_ip }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_nodes_to_config
+
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: oo_first_master
+ when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+#- include: config.yml
+- include: ../../common/openshift-node/config.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 4
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ansible_default_ipv4.address }}"
+ openshift_use_openshift_sdn: true
+ openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
+ os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+ osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+ osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 7a3b80da0..c22b897d5 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -34,27 +34,28 @@
count: "{{ num_infra }}"
- include: tasks/launch_instances.yml
vars:
- instances: "{{ infra_names }}"
+ instances: "{{ node_names }}"
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
- - set_fact:
- a_infra: "{{ infra_names[0] }}"
- - add_host: name={{ a_infra }} groups=service_master
+ - add_host:
+ name: "{{ master_names.0 }}"
+ groups: service_master
+ when: master_names is defined and master_names.0 is defined
- include: update.yml
-
-- name: Deploy OpenShift Services
- hosts: service_master
- connection: ssh
- gather_facts: yes
- roles:
- - openshift_registry
- - openshift_router
-
-- include: ../../common/openshift-cluster/create_services.yml
- vars:
- g_svc_master: "{{ service_master }}"
+#
+#- name: Deploy OpenShift Services
+# hosts: service_master
+# connection: ssh
+# gather_facts: yes
+# roles:
+# - openshift_registry
+# - openshift_router
+#
+#- include: ../../common/openshift-cluster/create_services.yml
+# vars:
+# g_svc_master: "{{ service_master }}"
- include: list.yml
diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml
index 5ba0f5a48..53b2b9a5e 100644
--- a/playbooks/gce/openshift-cluster/list.yml
+++ b/playbooks/gce/openshift-cluster/list.yml
@@ -14,11 +14,11 @@
groups: oo_list_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: List instance(s)
hosts: oo_list_hosts
gather_facts: no
tasks:
- debug:
- msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+ msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index 6307ecc27..c428cb465 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -10,14 +10,33 @@
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+ #service_account_permissions: "datastore,logging-write"
tags:
- created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
- env-{{ cluster }}
- host-type-{{ type }}
- - sub-host-type-{{ sub_host_type }}
+ - sub-host-type-{{ g_sub_host_type }}
- env-host-type-{{ cluster }}-openshift-{{ type }}
+ when: instances |length > 0
register: gce
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ g_sub_host_type }}"
+ when: instances |length > 0 and type == "node"
+
+- set_fact:
+ node_label:
+ # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+ region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+ type: "{{ type }}"
+ when: instances |length > 0 and type != "node"
+
- name: Add new instances to groups and set variables needed
add_host:
hostname: "{{ item.name }}"
@@ -27,16 +46,17 @@
groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
gce_public_ip: "{{ item.public_ip }}"
gce_private_ip: "{{ item.private_ip }}"
- with_items: gce.instance_data
+ openshift_node_labels: "{{ node_label }}"
+ with_items: gce.instance_data | default([], true)
- name: Wait for ssh
wait_for: port=22 host={{ item.public_ip }}
- with_items: gce.instance_data
+ with_items: gce.instance_data | default([], true)
- name: Wait for user setup
command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
register: result
until: result.rc == 0
- retries: 20
- delay: 10
- with_items: gce.instance_data
+ retries: 30
+ delay: 5
+ with_items: gce.instance_data | default([], true)
diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml
index 098b0df73..e20e0a8bc 100644
--- a/playbooks/gce/openshift-cluster/terminate.yml
+++ b/playbooks/gce/openshift-cluster/terminate.yml
@@ -1,25 +1,18 @@
---
- name: Terminate instance(s)
hosts: localhost
+ connection: local
gather_facts: no
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+ - set_fact: scratch_group=tag_env-{{ cluster_id }}
- add_host:
name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_nodes_to_terminate
+ groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-
- - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
- - add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_terminate, oo_masters_to_terminate
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
- ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+ with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -32,14 +25,34 @@
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
-- include: ../openshift-node/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- name: Terminate instances(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+
+ - name: Terminate instances that were previously launched
+ local_action:
+ module: gce
+ state: 'absent'
+ name: "{{ item }}"
+ service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+ pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+ project_id: "{{ lookup('env', 'gce_project_id') }}"
+ zone: "{{ lookup('env', 'zone') }}"
+ with_items: groups['oo_hosts_to_terminate'] | default([], true)
+ when: item is defined
-- include: ../openshift-master/terminate.yml
- vars:
- gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
- gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
- gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+# vars:
+# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+# gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index ae33083b9..6de007807 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -1,8 +1,11 @@
---
+do_we_use_openshift_sdn: true
+sdn_network_plugin: redhat/openshift-ovs-subnet
+# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
deployment_vars:
origin:
- image: centos-7
- ssh_user:
+ image: preinstalled-slave-50g-v5
+ ssh_user: root
sudo: yes
online:
image: libra-rhel7
@@ -12,4 +15,3 @@ deployment_vars:
image: rhel-7
ssh_user:
sudo: yes
-
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 98fe11251..c208eee81 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -20,5 +20,5 @@
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml
index 830f9d216..d3e768de5 100644
--- a/playbooks/libvirt/openshift-cluster/launch.yml
+++ b/playbooks/libvirt/openshift-cluster/launch.yml
@@ -17,6 +17,14 @@
- include: tasks/configure_libvirt.yml
+ - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ etcd_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "default"
+
- include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
- include: tasks/launch_instances.yml
vars:
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 4cb494056..4b91c6da8 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -63,8 +63,9 @@
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
register: nb_allocated_ips
until: nb_allocated_ips.stdout == '{{ instances | length }}'
- retries: 30
- delay: 1
+ retries: 60
+ delay: 3
+ when: instances | length != 0
- name: Collect IP addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
@@ -72,7 +73,7 @@
with_items: instances
- set_fact:
- ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+ ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
- name: Add new instances
add_host:
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 86dcd62bb..050bc7ab9 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -8,7 +8,7 @@
<!-- TODO: query for first available virbr interface available -->
<bridge name='virbr3' stp='on' delay='0'/>
<!-- TODO: make overridable -->
- <domain name='example.com'/>
+ <domain name='example.com' localOnly='yes' />
<dns>
<!-- TODO: automatically add host entries -->
</dns>
diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data
index 77b788109..eacae7c7e 100644
--- a/playbooks/libvirt/openshift-cluster/templates/user-data
+++ b/playbooks/libvirt/openshift-cluster/templates/user-data
@@ -19,5 +19,5 @@ system_info:
ssh_authorized_keys:
- {{ lookup('file', '~/.ssh/id_rsa.pub') }}
-bootcmd:
+runcmd:
- NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 3c9a231e3..a5ee2d6a5 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -15,6 +15,6 @@
g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
openshift_cluster_id: "{{ cluster_id }}"
- openshift_debug_level: 4
+ openshift_debug_level: 2
openshift_deployment_type: "{{ deployment_type }}"
openshift_hostname: "{{ ansible_default_ipv4.address }}"
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index d53884e0d..40e4ab22c 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -9,21 +9,6 @@ parameters:
label: Cluster ID
description: Identifier of the cluster
- num_masters:
- type: number
- label: Number of masters
- description: Number of masters
-
- num_nodes:
- type: number
- label: Number of compute nodes
- description: Number of compute nodes
-
- num_infra:
- type: number
- label: Number of infrastructure nodes
- description: Number of infrastructure nodes
-
cidr:
type: string
label: CIDR
@@ -40,6 +25,12 @@ parameters:
description: Name of the external network
default: external
+ floating_ip_pool:
+ type: string
+ label: Floating IP pool
+ description: Floating IP pools
+ default: external
+
ssh_public_key:
type: string
label: SSH public key
@@ -52,6 +43,21 @@ parameters:
description: Source of legitimate ssh connections
default: 0.0.0.0/0
+ num_masters:
+ type: number
+ label: Number of masters
+ description: Number of masters
+
+ num_nodes:
+ type: number
+ label: Number of compute nodes
+ description: Number of compute nodes
+
+ num_infra:
+ type: number
+ label: Number of infrastructure nodes
+ description: Number of infrastructure nodes
+
master_image:
type: string
label: Master image
@@ -290,7 +296,7 @@ resources:
subnet: { get_resource: subnet }
secgrp:
- { get_resource: master-secgrp }
- floating_network: { get_param: external_net }
+ floating_network: { get_param: floating_ip_pool }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
@@ -322,7 +328,7 @@ resources:
subnet: { get_resource: subnet }
secgrp:
- { get_resource: node-secgrp }
- floating_network: { get_param: external_net }
+ floating_network: { get_param: floating_ip_pool }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
@@ -355,7 +361,7 @@ resources:
secgrp:
- { get_resource: node-secgrp }
- { get_resource: infra-secgrp }
- floating_network: { get_param: external_net }
+ floating_network: { get_param: floating_ip_pool }
net_name:
str_replace:
template: openshift-ansible-cluster_id-net
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index d36bdbf26..651aef40b 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -19,30 +19,21 @@
changed_when: false
failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
- - name: Create OpenStack Stack
- command: 'heat stack-create -f {{ openstack_infra_heat_stack }}
- -P cluster_id={{ cluster_id }}
- -P dns_nameservers={{ openstack_network_dns | join(",") }}
- -P cidr={{ openstack_network_cidr }}
- -P ssh_incoming={{ openstack_ssh_access_from }}
- -P num_masters={{ num_masters }}
- -P num_nodes={{ num_nodes }}
- -P num_infra={{ num_infra }}
- -P master_image={{ deployment_vars[deployment_type].image }}
- -P node_image={{ deployment_vars[deployment_type].image }}
- -P infra_image={{ deployment_vars[deployment_type].image }}
- -P master_flavor={{ openstack_flavor["master"] }}
- -P node_flavor={{ openstack_flavor["node"] }}
- -P infra_flavor={{ openstack_flavor["infra"] }}
- -P ssh_public_key="{{ openstack_ssh_public_key }}"
- openshift-ansible-{{ cluster_id }}-stack'
+ - set_fact:
+ heat_stack_action: 'stack-create'
when: stack_show_result.rc == 1
+ - set_fact:
+ heat_stack_action: 'stack-update'
+ when: stack_show_result.rc == 0
- - name: Update OpenStack Stack
- command: 'heat stack-update -f {{ openstack_infra_heat_stack }}
+ - name: Create or Update OpenStack Stack
+ command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
-P cluster_id={{ cluster_id }}
- -P dns_nameservers={{ openstack_network_dns | join(",") }}
-P cidr={{ openstack_network_cidr }}
+ -P dns_nameservers={{ openstack_network_dns | join(",") }}
+ -P external_net={{ openstack_network_external_net }}
+ -P floating_ip_pool={{ openstack_floating_ip_pool }}
+ -P ssh_public_key="{{ openstack_ssh_public_key }}"
-P ssh_incoming={{ openstack_ssh_access_from }}
-P num_masters={{ num_masters }}
-P num_nodes={{ num_nodes }}
@@ -53,9 +44,7 @@
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P ssh_public_key="{{ openstack_ssh_public_key }}"
openshift-ansible-{{ cluster_id }}-stack'
- when: stack_show_result.rc == 0
- name: Wait for OpenStack Stack readiness
shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''
diff --git a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml b/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
deleted file mode 100644
index 2cbdb4805..000000000
--- a/playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Check infra
- command: 'heat stack-show {{ openstack_network_prefix }}-stack'
- register: stack_show_result
- changed_when: false
- failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
-- name: Create infra
- command: 'heat stack-create -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
- when: stack_show_result.rc == 1
-
-- name: Update infra
- command: 'heat stack-update -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
- when: stack_show_result.rc == 0
-
-- name: Wait for infra readiness
- shell: 'heat stack-show {{ openstack_network_prefix }}-stack | awk ''$2 == "stack_status" {print $4}'''
- register: stack_show_status_result
- until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
- retries: 30
- delay: 1
- failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
-- name: Create ssh keypair
- nova_keypair:
- name: "{{ openstack_ssh_keypair }}"
- public_key: "{{ openstack_ssh_public_key }}"
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 43e25f2e6..262d3f4ed 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -1,18 +1,14 @@
---
openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) |
default('files/heat_stack.yaml', True) }}"
-openstack_network_prefix: "{{ lookup('oo_option', 'network_prefix' ) |
- default('openshift-ansible-'+cluster_id, True) }}"
openstack_network_cidr: "{{ lookup('oo_option', 'net_cidr' ) |
default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}"
openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) |
default('external', True) }}"
-openstack_floating_ip_pools: "{{ lookup('oo_option', 'floating_ip_pools') |
- default('external', True) | oo_split() }}"
+openstack_floating_ip_pool: "{{ lookup('oo_option', 'floating_ip_pool' ) |
+ default('external', True) }}"
openstack_network_dns: "{{ lookup('oo_option', 'dns' ) |
default('8.8.8.8,8.8.4.4', True) | oo_split() }}"
-openstack_ssh_keypair: "{{ lookup('oo_option', 'keypair' ) |
- default(lookup('env', 'LOGNAME')+'_key', True) }}"
openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') |
default('~/.ssh/id_rsa.pub', True)) }}"
openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |