diff options
Diffstat (limited to 'playbooks/adhoc')
-rw-r--r-- | playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 77 | ||||
-rw-r--r-- | playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 11 | ||||
-rwxr-xr-x | playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml | 115 | ||||
-rw-r--r-- | playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 2 | ||||
-rw-r--r-- | playbooks/adhoc/s3_registry/s3_registry.j2 | 4 | ||||
-rw-r--r-- | playbooks/adhoc/s3_registry/s3_registry.yml | 27 | ||||
-rw-r--r-- | playbooks/adhoc/uninstall.yml | 159 | ||||
-rw-r--r-- | playbooks/adhoc/upgrades/upgrade.yml | 25 |
8 files changed, 329 insertions, 91 deletions
diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 54d3ea278..c14d08e87 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -1,6 +1,9 @@ # This deletes *ALL* Docker images, and uninstalls OpenShift and # Atomic Enterprise RPMs. It is primarily intended for use # with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml - hosts: - OSEv3:children @@ -8,59 +11,6 @@ sudo: yes tasks: - - service: name={{ item }} state=stopped - with_items: - - openvswitch - - origin-master - - origin-node - - atomic-openshift-master - - atomic-openshift-node - - openshift-master - - openshift-node - - atomic-enterprise-master - - atomic-enterprise-node - - etcd - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - etcd - - origin - - origin-master - - origin-node - - origin-sdn-ovs - - tuned-profiles-origin-node - - atomic-openshift - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - tuned-profiles-atomic-openshift-node - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - openshift - - openshift-master - - openshift-node - - openshift-sdn-ovs - - tuned-profiles-openshift-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - shell: docker ps -a -q | xargs docker stop changed_when: False failed_when: False @@ -73,27 +23,6 @@ changed_when: False failed_when: False - - file: path={{ item }} state=absent - with_items: - - /etc/openshift-sdn - - /root/.kube - - /etc/origin - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/origin - - /var/lib/openshift - - /var/lib/atomic-enterprise - - /etc/sysconfig/origin-master - - /etc/sysconfig/origin-node - - /etc/sysconfig/atomic-openshift-master - - /etc/sysconfig/atomic-openshift-node - - /etc/sysconfig/openshift-master - - /etc/sysconfig/openshift-node - - /etc/sysconfig/atomic-enterprise-master - - /etc/sysconfig/atomic-enterprise-node - - /etc/etcd - - /var/lib/etcd - - user: name={{ item }} state=absent remove=yes with_items: - alice diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index c9ae923bb..b6a2d2f26 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -27,9 +27,8 @@ gather_facts: no vars: - cli_volume_type: io1 + cli_volume_type: gp2 cli_volume_size: 30 - cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: @@ -104,7 +103,6 @@ volume_size: "{{ cli_volume_size | default(30, True)}}" volume_type: "{{ cli_volume_type }}" device_name: /dev/xvdb - iops: "{{ 30 * cli_volume_size }}" register: vol - debug: var=vol @@ -142,10 +140,3 @@ - debug: var=dockerstart - - name: Wait for docker to stabilize - pause: - seconds: 30 - - # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - - name: update zabbix docker items - command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..72fcd77b3 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,115 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +# To run: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device> +# +# Example: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +# Notes: +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive + hosts: "{{ cli_name }}" + user: root + connection: ssh + gather_facts: no + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_docker_device + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: "check to see if {{ cli_docker_device }} exists" + command: "test -e {{ cli_docker_device }}" + register: docker_dev_check + ignore_errors: yes + + - debug: var=docker_dev_check + + - name: "fail if {{ cli_docker_device }} doesn't exist" + fail: + msg: "{{ cli_docker_device }} doesn't exist. Please investigate" + when: docker_dev_check.rc != 0 + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: copy the docker-storage-setup config file + copy: + content: > + DEVS={{ cli_docker_device }} + VG=docker_vg + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: extend the vg + command: lvextend -l 90%VG /dev/docker_vg/docker-pool + register: extend_output + + - debug: var=extend_output + + - name: start docker + service: + name: docker + state: restarted + + - name: docker info + command: docker info + register: dockerinfo + + - debug: var=dockerinfo diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index ef9b45abd..63d473146 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -172,7 +172,7 @@ - name: pvmove onto new volume command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" - async: 3600 + async: 43200 poll: 10 - name: Remove the old docker drive from the volume group diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index 026b24456..acfa89515 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -7,8 +7,8 @@ storage: cache: layerinfo: inmemory s3: - accesskey: {{ accesskey }} - secretkey: {{ secretkey }} + accesskey: {{ aws_access_key }} + secretkey: {{ aws_secret_key }} region: us-east-1 bucket: {{ clusterid }}-docker encrypt: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 30b873db3..4dcef1a42 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,20 +1,38 @@ --- # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" +# ansible-playbook s3_registry.yml -e clusterid="mycluster" # # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- hosts: security_group_{{ clusterid }}_master +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master remote_user: root gather_facts: False + vars: + aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + tasks: + - name: Check for AWS creds + fail: + msg: "Couldn't find {{ item }} creds in ENV" + when: "{{ item }} == ''" + with_items: + - aws_access_key + - aws_secret_key + + - name: Scale down registry + command: oc scale --replicas=0 dc/docker-registry + - name: Create S3 bucket local_action: - module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create + + - name: Set up registry environment variable + command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 @@ -43,6 +61,9 @@ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry when: "'dockersecrets' not in dc.stdout" + - name: Wait for deployment config to take effect before scaling up + pause: seconds=30 + - name: Scale up registry command: oc scale --replicas=1 dc/docker-registry diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..0503b7cd4 --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,159 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible. This includes: +# +# configuration +# containers +# example templates and imagestreams +# images +# RPMs +--- +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - name: Detecting Operating System + shell: ls /run/ostree-booted + ignore_errors: yes + failed_when: false + register: ostree_output + + - set_fact: + is_atomic: "{{ ostree_output.rc == 0 }}" + + - name: Remove br0 interface + shell: ovs-vsctl del-br br0 + changed_when: False + failed_when: False + + - service: name={{ item }} state=stopped + with_items: + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-openshift-master + - atomic-openshift-master-api + - atomic-openshift-master-controllers + - atomic-openshift-node + - etcd + - openshift-master + - openshift-master-api + - openshift-master-controllers + - openshift-node + - openvswitch + - origin-master + - origin-master-api + - origin-master-controllers + - origin-node + + - yum: name={{ item }} state=absent + when: not is_atomic | bool + with_items: + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - etcd + - openshift + - openshift-master + - openshift-node + - openshift-sdn + - openshift-sdn-ovs + - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node + - tuned-profiles-openshift-node + - tuned-profiles-origin-node + + - name: Remove linux interfaces + shell: ip link del "{{ item }}" + changed_when: False + failed_when: False + with_items: + - lbr0 + - vlinuxbr + - vovsbr + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node + changed_when: False + failed_when: False + with_items: + - openshift-enterprise + - atomic-enterprise + - origin + + - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' + changed_when: False + failed_when: False + register: exited_containers_to_delete + with_items: + - aep3/aep + - openshift3/ose + - openshift/origin + + - shell: "docker rm {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ exited_containers_to_delete.results }}" + + - shell: docker images | grep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry.access.redhat.com/openshift3 + - registry.access.redhat.com/aep3 + - docker.io/openshift + + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + + - file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /etc/atomic-enterprise + - /etc/etcd + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /root/.kube + - "~{{ ansible_ssh_user }}/.kube" + - /usr/share/openshift/examples + - /var/lib/atomic-enterprise + - /var/lib/etcd + - /var/lib/openshift + - /var/lib/origin diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index e666f0472..ae1d0127c 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -1,4 +1,14 @@ --- +- name: Upgrade base package on masters + hosts: masters + roles: + - openshift_facts + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade base package + yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest + - name: Re-Run cluster configuration to apply latest configuration changes include: ../../common/openshift-cluster/config.yml vars: @@ -40,7 +50,7 @@ hosts: oo_first_master tasks: fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later - when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) + when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) - name: Update cluster policy hosts: oo_first_master @@ -50,6 +60,19 @@ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --confirm +- name: Update cluster policy bindings + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-role-bindings --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + - name: Upgrade default router hosts: oo_first_master vars: |