summaryrefslogtreecommitdiffstats
path: root/playbooks/adhoc
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/adhoc')
-rw-r--r--playbooks/adhoc/atomic_openshift_tutorial_reset.yml77
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml4
-rw-r--r--playbooks/adhoc/create_pv/create_pv.yaml21
-rw-r--r--playbooks/adhoc/create_pv/pv-template.j22
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml13
-rwxr-xr-xplaybooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml115
-rw-r--r--playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml4
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/noc/create_host.yml4
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml2
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml2
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.j223
-rw-r--r--playbooks/adhoc/s3_registry/s3_registry.yml78
-rwxr-xr-xplaybooks/adhoc/sdn_restart/oo-sdn-restart.yml52
-rw-r--r--playbooks/adhoc/setupnfs.yml21
-rw-r--r--playbooks/adhoc/uninstall.yml238
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml2
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml6
19 files changed, 813 insertions, 98 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
index 54d3ea278..c14d08e87 100644
--- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
+++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml
@@ -1,6 +1,9 @@
# This deletes *ALL* Docker images, and uninstalls OpenShift and
# Atomic Enterprise RPMs. It is primarily intended for use
# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
- hosts:
- OSEv3:children
@@ -8,59 +11,6 @@
sudo: yes
tasks:
- - service: name={{ item }} state=stopped
- with_items:
- - openvswitch
- - origin-master
- - origin-node
- - atomic-openshift-master
- - atomic-openshift-node
- - openshift-master
- - openshift-node
- - atomic-enterprise-master
- - atomic-enterprise-node
- - etcd
-
- - yum: name={{ item }} state=absent
- with_items:
- - openvswitch
- - etcd
- - origin
- - origin-master
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-origin-node
- - atomic-openshift
- - atomic-openshift-master
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - tuned-profiles-atomic-openshift-node
- - atomic-enterprise
- - atomic-enterprise-master
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - openshift
- - openshift-master
- - openshift-node
- - openshift-sdn-ovs
- - tuned-profiles-openshift-node
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
- changed_when: False
-
- shell: docker ps -a -q | xargs docker stop
changed_when: False
failed_when: False
@@ -73,27 +23,6 @@
changed_when: False
failed_when: False
- - file: path={{ item }} state=absent
- with_items:
- - /etc/openshift-sdn
- - /root/.kube
- - /etc/origin
- - /etc/atomic-enterprise
- - /etc/openshift
- - /var/lib/origin
- - /var/lib/openshift
- - /var/lib/atomic-enterprise
- - /etc/sysconfig/origin-master
- - /etc/sysconfig/origin-node
- - /etc/sysconfig/atomic-openshift-master
- - /etc/sysconfig/atomic-openshift-node
- - /etc/sysconfig/openshift-master
- - /etc/sysconfig/openshift-node
- - /etc/sysconfig/atomic-enterprise-master
- - /etc/sysconfig/atomic-enterprise-node
- - /etc/etcd
- - /var/lib/etcd
-
- user: name={{ item }} state=absent remove=yes
with_items:
- alice
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..471c41f16
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,4 @@
+- hosts: OSEv3
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml
index 4f0ef7a75..81c1ee653 100644
--- a/playbooks/adhoc/create_pv/create_pv.yaml
+++ b/playbooks/adhoc/create_pv/create_pv.yaml
@@ -1,20 +1,21 @@
---
-#example run:
+#example run:
# ansible-playbook -e "cli_volume_size=1" \
# -e "cli_device_name=/dev/xvdf" \
# -e "cli_hosttype=master" \
-# -e "cli_environment=ops" \
+# -e "cli_clusterid=ops" \
# create_pv.yaml
-# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
#
- name: Create a volume and attach it to master
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
vars:
cli_volume_type: gp2
cli_volume_iops: ''
oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
- intersect(groups['tag_environment_' ~ cli_environment]) |
+ intersect(groups['oo_clusterid_' ~ cli_clusterid]) |
first }}"
pre_tasks:
- fail:
@@ -24,7 +25,7 @@
- cli_volume_size
- cli_device_name
- cli_hosttype
- - cli_environment
+ - cli_clusterid
- name: set oo_name fact
set_fact:
@@ -55,7 +56,7 @@
args:
tags:
Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
- env: "{{cli_environment}}"
+ clusterid: "{{cli_clusterid}}"
register: voltags
- debug: var=voltags
@@ -103,7 +104,7 @@
filesystem:
dev: "{{ cli_device_name }}"
fstype: ext4
-
+
- name: Mount the dev
mount:
name: "{{ pv_mntdir }}"
@@ -112,7 +113,7 @@
state: mounted
- name: chgrp g+rwXs
- file:
+ file:
path: "{{ pv_mntdir }}"
mode: 'g+rwXs'
recurse: yes
@@ -149,11 +150,11 @@
# We have to use the shell module because we can't set env vars with the command module.
- name: "Place PV into oc"
- shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+ shell: "KUBECONFIG=/etc/origin/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
register: oc_output
- debug: var=oc_output
- - fail:
+ - fail:
msg: "Failed to add {{ pv_template }} to master."
when: oc_output.rc != 0
diff --git a/playbooks/adhoc/create_pv/pv-template.j2 b/playbooks/adhoc/create_pv/pv-template.j2
index 5654ef6c4..df082614b 100644
--- a/playbooks/adhoc/create_pv/pv-template.j2
+++ b/playbooks/adhoc/create_pv/pv-template.j2
@@ -10,7 +10,7 @@ spec:
storage: {{ vol_size }}Gi
accessModes:
- ReadWriteOnce
- persistentVolumeReclaimPolicy: Recycle
+ persistentVolumeReclaimPolicy: Retain
awsElasticBlockStore:
volumeID: aws://{{ vol_az }}/{{ vol_id }}
fsType: ext4
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index c9ae923bb..4d32fc40b 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -27,9 +27,8 @@
gather_facts: no
vars:
- cli_volume_type: io1
+ cli_volume_type: gp2
cli_volume_size: 30
- cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -104,7 +103,6 @@
volume_size: "{{ cli_volume_size | default(30, True)}}"
volume_type: "{{ cli_volume_type }}"
device_name: /dev/xvdb
- iops: "{{ 30 * cli_volume_size }}"
register: vol
- debug: var=vol
@@ -115,7 +113,7 @@
args:
tags:
Name: "{{ ec2_tag_Name }}"
- env: "{{ ec2_tag_environment }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
register: voltags
- name: Wait for volume to attach
@@ -142,10 +140,3 @@
- debug: var=dockerstart
- - name: Wait for docker to stabilize
- pause:
- seconds: 30
-
- # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- - name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
new file mode 100755
index 000000000..72fcd77b3
--- /dev/null
+++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+# To run:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+# Example:
+# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+# Notes:
+# * This will remove /var/lib/docker!
+# * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+ hosts: "{{ cli_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_docker_device
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if loopback
+ shell: docker info | grep 'Data file:.*loop'
+ register: loop_device_check
+ ignore_errors: yes
+
+ - debug:
+ var: loop_device_check
+
+ - name: fail if we don't detect loopback
+ fail:
+ msg: loopback not detected! Please investigate manually.
+ when: loop_device_check.rc == 1
+
+ - name: stop zagg client monitoring container
+ service:
+ name: oso-rhel7-zagg-client
+ state: stopped
+ ignore_errors: yes
+
+ - name: stop pcp client monitoring container
+ service:
+ name: oso-f22-host-monitoring
+ state: stopped
+ ignore_errors: yes
+
+ - name: "check to see if {{ cli_docker_device }} exists"
+ command: "test -e {{ cli_docker_device }}"
+ register: docker_dev_check
+ ignore_errors: yes
+
+ - debug: var=docker_dev_check
+
+ - name: "fail if {{ cli_docker_device }} doesn't exist"
+ fail:
+ msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+ when: docker_dev_check.rc != 0
+
+ - name: stop docker
+ service:
+ name: docker
+ state: stopped
+
+ - name: delete /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: remove /var/lib/docker
+ command: rm -rf /var/lib/docker
+
+ - name: copy the docker-storage-setup config file
+ copy:
+ content: >
+ DEVS={{ cli_docker_device }}
+ VG=docker_vg
+ dest: /etc/sysconfig/docker-storage-setup
+ owner: root
+ group: root
+ mode: 0664
+
+ - name: docker storage setup
+ command: docker-storage-setup
+ register: setup_output
+
+ - debug: var=setup_output
+
+ - name: extend the vg
+ command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+ register: extend_output
+
+ - debug: var=extend_output
+
+ - name: start docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: docker info
+ command: docker info
+ register: dockerinfo
+
+ - debug: var=dockerinfo
diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
index 1946a5f4f..b6dde357e 100644
--- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
+++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
@@ -52,12 +52,12 @@
ignore_errors: yes
- name: Remove non-running docker images
- shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null"
+ shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
ignore_errors: yes
# leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+ command: docker exec -i oso-rhel7-host-monitoring /usr/local/bin/cron-send-docker-metrics.py
# Get and show docker info again.
- name: Get docker info
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..d24e9cafa
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 200GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 200
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ clusterid: "{{ ec2_tag_clusterid }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 43200
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
index d250e6e69..2d2cae2b5 100644
--- a/playbooks/adhoc/noc/create_host.yml
+++ b/playbooks/adhoc/noc/create_host.yml
@@ -1,6 +1,8 @@
---
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
@@ -23,6 +25,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a host object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
index c0ec57ce1..8ad5fa0e2 100644
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ b/playbooks/adhoc/noc/create_maintenance.yml
@@ -2,6 +2,8 @@
#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
- name: 'Create a maintenace object in zabbix'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
index 4b94fa228..79cae24ab 100644
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ b/playbooks/adhoc/noc/get_zabbix_problems.yml
@@ -1,6 +1,8 @@
---
- name: 'Get current hosts who have triggers that are alerting by trigger description'
hosts: localhost
+ connection: local
+ become: no
gather_facts: no
roles:
- os_zabbix
diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2
new file mode 100644
index 000000000..10454ad11
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.j2
@@ -0,0 +1,23 @@
+version: 0.1
+log:
+ level: debug
+http:
+ addr: :5000
+storage:
+ cache:
+ layerinfo: inmemory
+ s3:
+ accesskey: {{ aws_access_key }}
+ secretkey: {{ aws_secret_key }}
+ region: {{ aws_bucket_region }}
+ bucket: {{ aws_bucket_name }}
+ encrypt: true
+ secure: true
+ v4auth: true
+ rootdirectory: /registry
+auth:
+ openshift:
+ realm: openshift
+middleware:
+ repository:
+ - name: openshift
diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml
new file mode 100644
index 000000000..daf84e242
--- /dev/null
+++ b/playbooks/adhoc/s3_registry/s3_registry.yml
@@ -0,0 +1,78 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+# ansible-playbook s3_registry.yml -e clusterid="mycluster" -e aws_bucket="clusterid-docker" -e aws_region="us-east-1"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master
+ remote_user: root
+ gather_facts: False
+
+ vars:
+ aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+ aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+ aws_bucket_name: "{{ aws_bucket | default(clusterid ~ '-docker') }}"
+ aws_bucket_region: "{{ aws_region | default(lookup('env', 'S3_REGION') | default('us-east-1', true)) }}"
+ aws_create_bucket: "{{ aws_create | default(True) }}"
+ aws_tmp_path: "{{ aws_tmp_pathfile | default('/root/config.yml')}}"
+ aws_delete_tmp_file: "{{ aws_delete_tmp | default(True) }}"
+
+ tasks:
+
+ - name: Check for AWS creds
+ fail:
+ msg: "Couldn't find {{ item }} creds in ENV"
+ when: "{{ item }} == ''"
+ with_items:
+ - aws_access_key
+ - aws_secret_key
+
+ - name: Scale down registry
+ command: oc scale --replicas=0 dc/docker-registry
+
+ - name: Create S3 bucket
+ when: aws_create_bucket | bool
+ local_action:
+ module: s3 bucket="{{ aws_bucket_name }}" mode=create
+
+ - name: Set up registry environment variable
+ command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+ - name: Generate docker registry config
+ template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+ - name: Determine if new secrets are needed
+ command: oc get secrets
+ register: secrets
+
+ - name: Create registry secrets
+ command: oc secrets new dockerregistry /root/config.yml
+ when: "'dockerregistry' not in secrets.stdout"
+
+ - name: Determine if service account contains secrets
+ command: oc describe serviceaccount/registry
+ register: serviceaccount
+
+ - name: Add secrets to registry service account
+ command: oc secrets add serviceaccount/registry secrets/dockerregistry
+ when: "'dockerregistry' not in serviceaccount.stdout"
+
+ - name: Determine if deployment config contains secrets
+ command: oc volume dc/docker-registry --list
+ register: dc
+
+ - name: Add secrets to registry deployment config
+ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+ when: "'dockersecrets' not in dc.stdout"
+
+ - name: Wait for deployment config to take effect before scaling up
+ pause: seconds=30
+
+ - name: Scale up registry
+ command: oc scale --replicas=1 dc/docker-registry
+
+ - name: Delete temporary config file
+ file: path={{ aws_tmp_path }} state=absent
+ when: aws_delete_tmp_file | bool
diff --git a/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
new file mode 100755
index 000000000..08e8f8968
--- /dev/null
+++ b/playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
@@ -0,0 +1,52 @@
+#!/usr/bin/ansible-playbook
+---
+#example run:
+# ansible-playbook -e "host=ops-node-compute-abcde" oo-sdn-restart.yml
+#
+
+- name: Check vars
+ hosts: localhost
+ gather_facts: false
+
+ pre_tasks:
+ - fail:
+ msg: "Playbook requires host to be set"
+ when: host is not defined or host == ''
+
+- name: Restart openshift/docker (and monitoring containers)
+ hosts: oo_version_3:&oo_name_{{ host }}
+ gather_facts: false
+ user: root
+
+ tasks:
+ - name: stop openshift/docker
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: restart openvswitch
+ service:
+ name: openvswitch
+ state: restarted
+
+ - name: wait 5 sec
+ pause:
+ seconds: 5
+
+ - name: start openshift/docker
+ service:
+ name: "{{ item }}"
+ state: started
+ with_items:
+ - atomic-openshift-node
+ - docker
+
+ - name: start monitoring containers
+ service:
+ name: "{{ item }}"
+ state: restarted
+ with_items:
+ - oso-rhel7-host-monitoring
diff --git a/playbooks/adhoc/setupnfs.yml b/playbooks/adhoc/setupnfs.yml
new file mode 100644
index 000000000..5f3631fcf
--- /dev/null
+++ b/playbooks/adhoc/setupnfs.yml
@@ -0,0 +1,21 @@
+---
+### This playbook is old and we are currently not using NFS.
+- hosts: tag_Name_nfs-v3-stg
+ sudo: no
+ remote_user: root
+ gather_facts: no
+ roles:
+ - role: openshift_storage_nfs_lvm
+ mount_dir: /exports/stg-black
+ volume_prefix: "kwoodsontest"
+ volume_size: 5
+ volume_num_start: 222
+ number_of_volumes: 3
+ tasks:
+ - fetch:
+ dest: json/
+ src: /root/"{{ item }}"
+ with_items:
+ - persistent-volume.kwoodsontest5g0222.json
+ - persistent-volume.kwoodsontest5g0223.json
+ - persistent-volume.kwoodsontest5g0224.json
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
new file mode 100644
index 000000000..680964d80
--- /dev/null
+++ b/playbooks/adhoc/uninstall.yml
@@ -0,0 +1,238 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible. This includes:
+#
+# configuration
+# containers
+# example templates and imagestreams
+# images
+# RPMs
+---
+- hosts:
+ - OSEv3:children
+
+ sudo: yes
+
+ tasks:
+ - name: Detecting Operating System
+ shell: ls /run/ostree-booted
+ ignore_errors: yes
+ failed_when: false
+ register: ostree_output
+
+ # Since we're not calling openshift_facts we'll do this for now
+ - set_fact:
+ is_atomic: "{{ ostree_output.rc == 0 }}"
+ - set_fact:
+ is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - atomic-openshift-node
+ - etcd
+ - haproxy
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - openshift-node
+ - openvswitch
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - origin-node
+ - pcsd
+ failed_when: false
+
+ - name: Stop additional atomic services
+ service: name={{ item }} state=stopped
+ when: is_containerized | bool
+ with_items:
+ - etcd_container
+ failed_when: false
+
+ - name: Remove packages
+ action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+ when: not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - corosync
+ - etcd
+ - haproxy
+ - kubernetes-client
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - pacemaker
+ - pcs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+ changed_when: False
+
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ changed_when: False
+ failed_when: False
+ with_items:
+ - openshift-enterprise
+ - atomic-enterprise
+ - origin
+
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
+ changed_when: False
+ failed_when: False
+ register: exited_containers_to_delete
+ with_items:
+ - aep3.*/aep
+ - aep3.*/node
+ - aep3.*/openvswitch
+ - openshift3/ose
+ - openshift3/node
+ - openshift3/openvswitch
+ - openshift/origin
+
+ - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ exited_containers_to_delete.results }}"
+
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+
+ - name: Remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: restart docker
+ service:
+ name: docker
+ state: restarted
+
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - "~{{ ansible_ssh_user }}/.kube"
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/atomic-enterprise
+ - /etc/corosync
+ - /etc/etcd
+ - /etc/openshift
+ - /etc/openshift-sdn
+ - /etc/origin
+ - /etc/systemd/system/atomic-openshift-master.service
+ - /etc/systemd/system/atomic-openshift-master-api.service
+ - /etc/systemd/system/atomic-openshift-master-controllers.service
+ - /etc/systemd/system/atomic-openshift-node.service
+ - /etc/systemd/system/etcd_container.service
+ - /etc/systemd/system/openvswitch.service
+ - /etc/sysconfig/atomic-enterprise-master
+ - /etc/sysconfig/atomic-enterprise-master-api
+ - /etc/sysconfig/atomic-enterprise-master-controllers
+ - /etc/sysconfig/atomic-enterprise-node
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
+ - /etc/sysconfig/atomic-openshift-node
+ - /etc/sysconfig/openshift-master
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
+ - /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
+ - /etc/sysconfig/origin-node
+ - /etc/systemd/system/atomic-openshift-node.service.wants
+ - /root/.kube
+ - /run/openshift-sdn
+ - /usr/share/openshift/examples
+ - /var/lib/atomic-enterprise
+ - /var/lib/etcd
+ - /var/lib/openshift
+ - /var/lib/origin
+ - /var/lib/pacemaker
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+ - /usr/local/bin/openshift
+ - /usr/local/bin/oadm
+ - /usr/local/bin/oc
+ - /usr/local/bin/kubectl
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
+
+- hosts: nodes
+ sudo: yes
+ tasks:
+ - name: restart docker
+ service: name=docker state=restarted
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
index 1e884240a..09f7c76cc 100644
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
@@ -1,6 +1,8 @@
---
- hosts: localhost
gather_facts: no
+ connection: local
+ become: no
vars:
g_server: http://localhost:8080/zabbix/api_jsonrpc.php
g_user: ''
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
index e2b8150c6..2f1d003ff 100755
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
@@ -2,12 +2,18 @@
---
- hosts: localhost
gather_facts: no
+ connection: local
+ become: no
vars:
g_server: http://localhost/zabbix/api_jsonrpc.php
g_user: Admin
g_password: zabbix
+ g_zbx_scriptrunner_user: scriptrunner
+ g_zbx_scriptrunner_bastion_host: specialhost.example.com
roles:
- role: os_zabbix
ozb_server: "{{ g_server }}"
ozb_user: "{{ g_user }}"
ozb_password: "{{ g_password }}"
+ ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
+ ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"