From 69f6fd410500a3dd20a97a9e3dad860761b09ac8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 2 Jul 2015 11:59:22 -0400 Subject: playbooks/adhoc: Add a tutorial-reset playbook to undo everything This makes it easier to run through the tutorial, as well as reset a VM or baremetal node to a clean slate for developer testing. --- playbooks/adhoc/tutorial-reset.yml | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 playbooks/adhoc/tutorial-reset.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml new file mode 100644 index 000000000..1ceb72d19 --- /dev/null +++ b/playbooks/adhoc/tutorial-reset.yml @@ -0,0 +1,46 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - docker + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/docker + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe -- cgit v1.2.3 From c85b503d6f02514beb9ea73c6a12fe2ef1bfb25a Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Wed, 12 Aug 2015 09:48:44 -0400 Subject: Added /root/.kube to be deleted so that the stuff there does not prevent a new install. --- playbooks/adhoc/tutorial-reset.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml index 1ceb72d19..77bc13b17 100644 --- a/playbooks/adhoc/tutorial-reset.yml +++ b/playbooks/adhoc/tutorial-reset.yml @@ -39,6 +39,7 @@ - /etc/atomic-enterprise - /etc/openshift - /var/lib/docker + - /root/.kube - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 472ecf8ac4bd63556b91b70a779e2e738546f77c Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Thu, 13 Aug 2015 18:28:14 -0400 Subject: Renamed the file as it mainly applies to atomic enterprise. --- .../adhoc/atomic_enterprise_tutorial_reset.yml | 47 ++++++++++++++++++++++ playbooks/adhoc/tutorial-reset.yml | 47 ---------------------- 2 files changed, 47 insertions(+), 47 deletions(-) create mode 100644 playbooks/adhoc/atomic_enterprise_tutorial_reset.yml delete mode 100644 playbooks/adhoc/tutorial-reset.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml new file mode 100644 index 000000000..77bc13b17 --- /dev/null +++ b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml @@ -0,0 +1,47 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - docker + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/docker + - /root/.kube + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe diff --git a/playbooks/adhoc/tutorial-reset.yml b/playbooks/adhoc/tutorial-reset.yml deleted file mode 100644 index 77bc13b17..000000000 --- a/playbooks/adhoc/tutorial-reset.yml +++ /dev/null @@ -1,47 +0,0 @@ -# This deletes *ALL* Docker images, and uninstalls OpenShift and -# Atomic Enterprise RPMs. It is primarily intended for use -# with the tutorial as well as for developers to reset state. - -- hosts: - - OSEv3:children - - sudo: yes - - tasks: - - service: name={{ item }} state=stopped - with_items: - - docker - - atomic-enterprise-master - - atomic-enterprise-node - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - file: path={{ item }} state=absent - with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/docker - - /root/.kube - - - user: name={{ item }} state=absent remove=yes - with_items: - - alice - - joe -- cgit v1.2.3 From 008aa1b39a8c27cf227c87cdf225182a18a992e6 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Fri, 14 Aug 2015 17:26:45 -0400 Subject: Updated tutorial reset file and made following chages: 1. Included openshift clean up 2. Renamed file to atomic_openshift_tutorial_reset.yml 3. docker service is not not stopped 4. docker containers and images are removed 5. /etc/openshift-sdn are removed too now --- .../adhoc/atomic_enterprise_tutorial_reset.yml | 47 --------------- .../adhoc/atomic_openshift_tutorial_reset.yml | 68 ++++++++++++++++++++++ 2 files changed, 68 insertions(+), 47 deletions(-) delete mode 100644 playbooks/adhoc/atomic_enterprise_tutorial_reset.yml create mode 100644 playbooks/adhoc/atomic_openshift_tutorial_reset.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml b/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml deleted file mode 100644 index 77bc13b17..000000000 --- a/playbooks/adhoc/atomic_enterprise_tutorial_reset.yml +++ /dev/null @@ -1,47 +0,0 @@ -# This deletes *ALL* Docker images, and uninstalls OpenShift and -# Atomic Enterprise RPMs. It is primarily intended for use -# with the tutorial as well as for developers to reset state. - -- hosts: - - OSEv3:children - - sudo: yes - - tasks: - - service: name={{ item }} state=stopped - with_items: - - docker - - atomic-enterprise-master - - atomic-enterprise-node - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - file: path={{ item }} state=absent - with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/docker - - /root/.kube - - - user: name={{ item }} state=absent remove=yes - with_items: - - alice - - joe diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml new file mode 100644 index 000000000..91159ad8e --- /dev/null +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -0,0 +1,68 @@ +# This deletes *ALL* Docker images, and uninstalls OpenShift and +# Atomic Enterprise RPMs. It is primarily intended for use +# with the tutorial as well as for developers to reset state. + +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - openshift-master + - openshift-node + - openvswitch + - atomic-enterprise-master + - atomic-enterprise-node + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - openshift + - openshift-master + - openshift-node + - openshift-sdn-ovs + - tuned-profiles-openshift-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker ps -a -q | xargs docker stop + changed_when: False + + - shell: docker ps -a -q| xargs docker rm + changed_when: False + + - shell: docker images -q |xargs docker rmi + changed_when: False + + - file: path={{ item }} state=absent + with_items: + - /var/lib/atomic-enterprise + - /etc/sysconfig/atomic-enterprise + - /etc/atomic-enterprise + - /etc/openshift + - /etc/openshift-sdn + - /root/.kube + - /etc/sysconfig/openshift + - /var/lib/openshift + + - user: name={{ item }} state=absent remove=yes + with_items: + - alice + - joe -- cgit v1.2.3 From 0e94fa986dd928888c36d2fbef71359c0b9b05d2 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Mon, 17 Aug 2015 11:01:41 -0400 Subject: Updated to include origin and atomic-openshift RPMs re-factoring to include all origin, AE and openshift products. For back-word compatibility, older openshift and AE naming is retained too. --- .../adhoc/atomic_openshift_tutorial_reset.yml | 37 ++++++++++++++++++---- 1 file changed, 31 insertions(+), 6 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 91159ad8e..1200caa2a 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -10,15 +10,29 @@ tasks: - service: name={{ item }} state=stopped with_items: + - openvswitch + - origin-master + - origin-node + - atomic-openshift-master + - atomic-openshift-node - openshift-master - openshift-node - - openvswitch - atomic-enterprise-master - atomic-enterprise-node - yum: name={{ item }} state=absent with_items: - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-origin-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - tuned-profiles-atomic-openshift-node - atomic-enterprise - atomic-enterprise-master - atomic-enterprise-node @@ -36,6 +50,9 @@ - shell: systemctl daemon-reload changed_when: False + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true changed_when: False @@ -53,14 +70,22 @@ - file: path={{ item }} state=absent with_items: - - /var/lib/atomic-enterprise - - /etc/sysconfig/atomic-enterprise - - /etc/atomic-enterprise - - /etc/openshift - /etc/openshift-sdn - /root/.kube - - /etc/sysconfig/openshift + - /etc/origin + - /etc/atomic-enterprise + - /etc/openshift + - /var/lib/origin - /var/lib/openshift + - /var/lib/atomic-enterprise + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 3c3669ccf9bacd69a222cdb45a0c377da0ce090a Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 19 Aug 2015 13:21:20 -0400 Subject: remove fstab entry after pv creation --- playbooks/adhoc/create_pv/create_pv.yaml | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 684a0ca72..591b1d902 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -118,6 +118,13 @@ state: unmounted fstype: ext4 + - name: remove from fstab + mount: + name: "{{ pv_mntdir }}" + src: "{{ cli_device_name }}" + state: absent + fstype: ext4 + - name: detach drive delegate_to: localhost ec2_vol: -- cgit v1.2.3 From 0dc89f3583a5e88e1ca66780e974bc9520910410 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 19 Aug 2015 17:20:26 -0400 Subject: Added tagging to the pv volumes --- playbooks/adhoc/create_pv/create_pv.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 591b1d902..4f0ef7a75 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -50,6 +50,16 @@ - debug: var=vol + - name: tag the vol with a name + ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}} + args: + tags: + Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}" + env: "{{cli_environment}}" + register: voltags + + - debug: var=voltags + - name: Configure the drive gather_facts: no hosts: oo_master -- cgit v1.2.3 From 49923edfba6d396140881d6a920e83f9ecf79f77 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Thu, 20 Aug 2015 11:44:27 -0400 Subject: fixed zbx_user. Update password playbook added --- playbooks/adhoc/zabbix_setup/create_user.yml | 31 ++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml new file mode 100644 index 000000000..dd74798b7 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/create_user.yml @@ -0,0 +1,31 @@ +--- +# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' +# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml +- hosts: localhost + gather_facts: no + vars_files: + - vars/template_heartbeat.yml + - vars/template_os_linux.yml + vars: + g_zserver: http://localhost/zabbix/api_jsonrpc.php + g_zuser: admin + g_zpassword: "{{ cli_password }}" + roles: + - ../../../roles/os_zabbix + post_tasks: + - zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + state: list + register: users + + - debug: var=users + + - name: Update zabbix creds for admin + zbx_user: + server: "{{ g_zserver }}" + user: "{{ g_zuser }}" + password: "{{ g_zpassword }}" + alias: Admin + passwd: "{{ cli_new_password | default(g_zpassword, true) }}" -- cgit v1.2.3 From 693be4802c2b3886b82681c5c1666b9f13d9ca36 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Fri, 21 Aug 2015 17:44:30 -0400 Subject: Updates for zbx ans module --- playbooks/adhoc/zabbix_setup/clean_zabbix.yml | 57 ++++++++------ playbooks/adhoc/zabbix_setup/create_template.yml | 57 -------------- playbooks/adhoc/zabbix_setup/create_user.yml | 31 -------- playbooks/adhoc/zabbix_setup/filter_plugins | 1 - playbooks/adhoc/zabbix_setup/roles | 1 - playbooks/adhoc/zabbix_setup/setup_zabbix.yml | 38 --------- .../adhoc/zabbix_setup/vars/template_heartbeat.yml | 11 --- .../adhoc/zabbix_setup/vars/template_host.yml | 27 ------- .../adhoc/zabbix_setup/vars/template_master.yml | 27 ------- .../adhoc/zabbix_setup/vars/template_node.yml | 27 ------- .../adhoc/zabbix_setup/vars/template_os_linux.yml | 90 ---------------------- .../adhoc/zabbix_setup/vars/template_router.yml | 27 ------- 12 files changed, 32 insertions(+), 362 deletions(-) delete mode 100644 playbooks/adhoc/zabbix_setup/create_template.yml delete mode 100644 playbooks/adhoc/zabbix_setup/create_user.yml delete mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins delete mode 120000 playbooks/adhoc/zabbix_setup/roles delete mode 100644 playbooks/adhoc/zabbix_setup/setup_zabbix.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_host.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_master.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_node.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml delete mode 100644 playbooks/adhoc/zabbix_setup/vars/template_router.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml index a31cbef65..1e884240a 100644 --- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml +++ b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml @@ -2,50 +2,57 @@ - hosts: localhost gather_facts: no vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix + g_server: http://localhost:8080/zabbix/api_jsonrpc.php + g_user: '' + g_password: '' + roles: - - ../../../roles/os_zabbix - post_tasks: + - lib_zabbix - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + post_tasks: + - name: CLEAN List template for heartbeat + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template Heartbeat' register: templ_heartbeat - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template App Zabbix Server' register: templ_zabbix_server - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List template app zabbix server + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list name: 'Template App Zabbix Agent' register: templ_zabbix_agent - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: CLEAN List all templates + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" state: list register: templates - debug: var=templ_heartbeat.results - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" + - name: Remove templates if heartbeat template is missing + zbx_template: + zbx_server: "{{ g_server }}" + zbx_user: "{{ g_user }}" + zbx_password: "{{ g_password }}" + name: "{{ item }}" state: absent with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}" when: templ_heartbeat.results | length == 0 diff --git a/playbooks/adhoc/zabbix_setup/create_template.yml b/playbooks/adhoc/zabbix_setup/create_template.yml deleted file mode 100644 index 50fff53b2..000000000 --- a/playbooks/adhoc/zabbix_setup/create_template.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- debug: var=ctp_template - -- name: Create Template - zbx_template: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - name: "{{ ctp_template.name }}" - register: ctp_created_template - -- debug: var=ctp_created_template - -#- name: Create Application -# zbxapi: -# server: "{{ ctp_zserver }}" -# user: "{{ ctp_zuser }}" -# password: "{{ ctp_zpassword }}" -# zbx_class: Application -# state: present -# params: -# name: "{{ ctp_template.application.name}}" -# hostid: "{{ ctp_created_template.results[0].templateid }}" -# search: -# name: "{{ ctp_template.application.name}}" -# register: ctp_created_application - -#- debug: var=ctp_created_application - -- name: Create Items - zbx_item: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - key: "{{ item.key }}" - name: "{{ item.name | default(item.key, true) }}" - value_type: "{{ item.value_type | default('int') }}" - template_name: "{{ ctp_template.name }}" - with_items: ctp_template.zitems - register: ctp_created_items - -#- debug: var=ctp_created_items - -- name: Create Triggers - zbx_trigger: - server: "{{ ctp_zserver }}" - user: "{{ ctp_zuser }}" - password: "{{ ctp_zpassword }}" - description: "{{ item.description }}" - expression: "{{ item.expression }}" - priority: "{{ item.priority }}" - with_items: ctp_template.ztriggers - when: ctp_template.ztriggers is defined - -#- debug: var=ctp_created_triggers - - diff --git a/playbooks/adhoc/zabbix_setup/create_user.yml b/playbooks/adhoc/zabbix_setup/create_user.yml deleted file mode 100644 index dd74798b7..000000000 --- a/playbooks/adhoc/zabbix_setup/create_user.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# export PYTHONPATH='/usr/lib/python2.7/site-packages/:/home/kwoodson/git/openshift-tools' -# ansible-playbook -e 'cli_password=zabbix' -e 'cli_new_password=new-zabbix' create_user.yml -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: admin - g_zpassword: "{{ cli_password }}" - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: users - - - debug: var=users - - - name: Update zabbix creds for admin - zbx_user: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - alias: Admin - passwd: "{{ cli_new_password | default(g_zpassword, true) }}" diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/adhoc/zabbix_setup/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/adhoc/zabbix_setup/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml b/playbooks/adhoc/zabbix_setup/setup_zabbix.yml deleted file mode 100644 index 1729194b5..000000000 --- a/playbooks/adhoc/zabbix_setup/setup_zabbix.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - vars_files: - - vars/template_heartbeat.yml - - vars/template_os_linux.yml - vars: - g_zserver: http://localhost/zabbix/api_jsonrpc.php - g_zuser: Admin - g_zpassword: zabbix - roles: - - ../../../roles/os_zabbix - post_tasks: - - zbx_template: - server: "{{ g_zserver }}" - user: "{{ g_zuser }}" - password: "{{ g_zpassword }}" - state: list - register: templates - - - debug: var=templates - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_heartbeat }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - - - name: Include Template - include: create_template.yml - vars: - ctp_template: "{{ g_template_os_linux }}" - ctp_zserver: "{{ g_zserver }}" - ctp_zuser: "{{ g_zuser }}" - ctp_zpassword: "{{ g_zpassword }}" - diff --git a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml b/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml deleted file mode 100644 index 22cc75554..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -g_template_heartbeat: - name: Template Heartbeat - zitems: - - name: Heartbeat Ping - hostid: - key: heartbeat.ping - ztriggers: - - description: 'Heartbeat.ping has failed on {HOST.NAME}' - expression: '{Template Heartbeat:heartbeat.ping.last()}<>0' - priority: avg diff --git a/playbooks/adhoc/zabbix_setup/vars/template_host.yml b/playbooks/adhoc/zabbix_setup/vars/template_host.yml deleted file mode 100644 index e7cc667cb..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_host.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_host: - params: - name: Template Host - host: Template Host - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Host - zitems: - - name: Host Ping - hostid: - key_: host.ping - type: 2 - value_type: 0 - output: extend - search: - key_: host.ping - ztriggers: - - description: 'Host ping has failed on {HOST.NAME}' - expression: '{Template Host:host.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Host ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_master.yml b/playbooks/adhoc/zabbix_setup/vars/template_master.yml deleted file mode 100644 index 5f9b41a4f..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_master.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_master: - params: - name: Template Master - host: Template Master - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Master - zitems: - - name: Master Etcd Ping - hostid: - key_: master.etcd.ping - type: 2 - value_type: 0 - output: extend - search: - key_: master.etcd.ping - ztriggers: - - description: 'Master Etcd ping has failed on {HOST.NAME}' - expression: '{Template Master:master.etcd.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Master Etcd ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_node.yml b/playbooks/adhoc/zabbix_setup/vars/template_node.yml deleted file mode 100644 index 98c343a24..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_node.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_node: - params: - name: Template Node - host: Template Node - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Node - zitems: - - name: Kubelet Ping - hostid: - key_: kubelet.ping - type: 2 - value_type: 0 - output: extend - search: - key_: kubelet.ping - ztriggers: - - description: 'Kubelet ping has failed on {HOST.NAME}' - expression: '{Template Node:kubelet.ping.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Kubelet ping has failed on*' - expandExpression: True diff --git a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml b/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml deleted file mode 100644 index 9cc038ffa..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml +++ /dev/null @@ -1,90 +0,0 @@ ---- -g_template_os_linux: - name: Template OS Linux - zitems: - - key: kernel.uname.sysname - value_type: string - - - key: kernel.all.cpu.wait.total - value_type: int - - - key: kernel.all.cpu.irq.hard - value_type: int - - - key: kernel.all.cpu.idle - value_type: int - - - key: kernel.uname.distro - value_type: string - - - key: kernel.uname.nodename - value_type: string - - - key: kernel.all.cpu.irq.soft - value_type: int - - - key: kernel.all.load.15_minute - value_type: float - - - key: kernel.all.cpu.sys - value_type: int - - - key: kernel.all.load.5_minute - value_type: float - - - key: mem.freemem - value_type: int - - - key: kernel.all.cpu.nice - value_type: int - - - key: mem.util.bufmem - value_type: int - - - key: swap.used - value_type: int - - - key: kernel.all.load.1_minute - value_type: float - - - key: kernel.uname.version - value_type: string - - - key: swap.length - value_type: int - - - key: mem.physmem - value_type: int - - - key: kernel.all.uptime - value_type: int - - - key: swap.free - value_type: int - - - key: mem.util.used - value_type: int - - - key: kernel.all.cpu.user - value_type: int - - - key: kernel.uname.machine - value_type: string - - - key: hinv.ncpu - value_type: int - - - key: mem.util.cached - value_type: int - - - key: kernel.all.cpu.steal - value_type: int - - - key: kernel.all.pswitch - value_type: int - - - key: kernel.uname.release - value_type: string - - - key: proc.nprocs - value_type: int diff --git a/playbooks/adhoc/zabbix_setup/vars/template_router.yml b/playbooks/adhoc/zabbix_setup/vars/template_router.yml deleted file mode 100644 index 4dae7da1e..000000000 --- a/playbooks/adhoc/zabbix_setup/vars/template_router.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -g_template_router: - params: - name: Template Router - host: Template Router - groups: - - groupid: 1 # FIXME (not real) - output: extend - search: - name: Template Router - zitems: - - name: Router Backends down - hostid: - key_: router.backends.down - type: 2 - value_type: 0 - output: extend - search: - key_: router.backends.down - ztriggers: - - description: 'Number of router backends down on {HOST.NAME}' - expression: '{Template Router:router.backends.down.last()}<>0' - priority: 3 - searchWildcardsEnabled: True - search: - description: 'Number of router backends down on {HOST.NAME}' - expandExpression: True -- cgit v1.2.3 From d565411ae9f2080c7c575744099fe5f79de2bb55 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Aug 2015 21:32:17 -0400 Subject: adhoc/tutorial_reset: Don't error out if there are no Docker images I'd like this playbook to always work. --- playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 3 +++ 1 file changed, 3 insertions(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 1200caa2a..3e22f8f2d 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -61,12 +61,15 @@ - shell: docker ps -a -q | xargs docker stop changed_when: False + failed_when: False - shell: docker ps -a -q| xargs docker rm changed_when: False + failed_when: False - shell: docker images -q |xargs docker rmi changed_when: False + failed_when: False - file: path={{ item }} state=absent with_items: -- cgit v1.2.3 From 61ba47474f12fb83e9e40f2a1f0a47fd5d393457 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Aug 2015 22:19:27 -0400 Subject: adhoc/tutorial_reset: Also delete etcd and data I needed this because I forgot to override openshift_hostname, and it found the wrong hostname, which then leaked into etcd certs, which caused the master to fail to start. --- playbooks/adhoc/atomic_openshift_tutorial_reset.yml | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 3e22f8f2d..54d3ea278 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -19,10 +19,12 @@ - openshift-node - atomic-enterprise-master - atomic-enterprise-node + - etcd - yum: name={{ item }} state=absent with_items: - openvswitch + - etcd - origin - origin-master - origin-node @@ -89,6 +91,8 @@ - /etc/sysconfig/openshift-node - /etc/sysconfig/atomic-enterprise-master - /etc/sysconfig/atomic-enterprise-node + - /etc/etcd + - /var/lib/etcd - user: name={{ item }} state=absent remove=yes with_items: -- cgit v1.2.3 From 1f52ea8c4e2f8cfce51e98cb3614c61f0d78ec3e Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Fri, 28 Aug 2015 18:03:59 -0400 Subject: added docker zabbix template, removed unused / old templates so they don't confuse other people. --- playbooks/adhoc/zabbix_setup/filter_plugins | 1 + playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml | 7 +++++++ playbooks/adhoc/zabbix_setup/oo-config-zaio.yml | 13 +++++++++++++ playbooks/adhoc/zabbix_setup/roles | 1 + 4 files changed, 22 insertions(+) create mode 120000 playbooks/adhoc/zabbix_setup/filter_plugins create mode 100755 playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml create mode 100755 playbooks/adhoc/zabbix_setup/oo-config-zaio.yml create mode 120000 playbooks/adhoc/zabbix_setup/roles (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml new file mode 100755 index 000000000..0fe65b338 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml @@ -0,0 +1,7 @@ +#!/usr/bin/env ansible-playbook +--- +- include: clean_zabbix.yml + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml new file mode 100755 index 000000000..e2b8150c6 --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml @@ -0,0 +1,13 @@ +#!/usr/bin/ansible-playbook +--- +- hosts: localhost + gather_facts: no + vars: + g_server: http://localhost/zabbix/api_jsonrpc.php + g_user: Admin + g_password: zabbix + roles: + - role: os_zabbix + ozb_server: "{{ g_server }}" + ozb_user: "{{ g_user }}" + ozb_password: "{{ g_password }}" diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/adhoc/zabbix_setup/roles @@ -0,0 +1 @@ +../../../roles \ No newline at end of file -- cgit v1.2.3 From c1c8d6045e22a01e81f582bd4b80cc8fadf6e035 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 10:54:39 -0400 Subject: added the docker loopback fixer script --- .../docker_loopback_to_lvm/docker-storage-setup | 2 + .../docker_loopback_to_direct_lvm.yml | 141 +++++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup create mode 100644 playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup new file mode 100644 index 000000000..059058823 --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup @@ -0,0 +1,2 @@ +DEVS=/dev/xvdb +VG=docker_vg diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml new file mode 100644 index 000000000..70c6e03dc --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -0,0 +1,141 @@ +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker) +# in AWS. This adds an additional EBS volume and creates the Volume Group on this EBS volume to use. +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_environment=' -e "cli_volume_size=30" -e docker_loopback_to_direct_lvm.yml.yml +# +# Notes: +# * By default this will do a 30GB volume. +# * iops are calculated by Disk Size * 30. e.g ( 30GB * 30) = 900 iops +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) +# + +- name: Fix docker to have a provisioned iops drive + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: io1 + cli_volume_size: 30 + cli_volume_iops: {{ 30 * cli_volume_size}} + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: check to see if /dev/xvdb exists + command: test -e /dev/xvdb + register: xvdb_check + ignore_errors: yes + + - debug: var=xvdb_check + + - name: fail if /dev/xvdb already exists + fail: + msg: /dev/xvdb already exists. Please investigate + when: xvdb_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdb + iops: "{{ 30 * cli_volume_size }}" + register: vol + + - debug: var=vol + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: Wait for volume to attach + pause: + seconds: 30 + + - name: copy the docker-storage-setup config file + copy: + src: docker-storage-setup + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart -- cgit v1.2.3 From 246fa73c71387f0c44d1689907416ca5da5bba2f Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 11:16:52 -0400 Subject: cleaned up some errors in loopback playbook --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 70c6e03dc..74cc9f628 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -8,7 +8,10 @@ # export AWS_SECRET_ACCESS_KEY='XXXXXX' # # 2. run the playbook: -# ansible-playbook -e 'cli_environment=' -e "cli_volume_size=30" -e docker_loopback_to_direct_lvm.yml.yml +# ansible-playbook -e 'cli_tag_name=' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-master-f58e0' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Notes: # * By default this will do a 30GB volume. @@ -26,7 +29,7 @@ vars: cli_volume_type: io1 cli_volume_size: 30 - cli_volume_iops: {{ 30 * cli_volume_size}} + cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: -- cgit v1.2.3 From 92cc48330ed171171c6a370644a4778727018fad Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Fri, 18 Sep 2015 12:14:39 -0400 Subject: added playbook for docker storage cleanup --- .../docker_loopback_to_direct_lvm.yml | 11 +++- .../docker_storage_cleanup.yml | 69 ++++++++++++++++++++++ 2 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 74cc9f628..c9ae923bb 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -11,7 +11,7 @@ # ansible-playbook -e 'cli_tag_name=' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Example: -# ansible-playbook -e 'cli_tag_name=ops-master-f58e0' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml +# ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml # # Notes: # * By default this will do a 30GB volume. @@ -136,9 +136,16 @@ - debug: var=setup_output - - name: start docker command: systemctl start docker.service register: dockerstart - debug: var=dockerstart + + - name: Wait for docker to stabilize + pause: + seconds: 30 + + # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support + - name: update zabbix docker items + command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml new file mode 100644 index 000000000..1946a5f4f --- /dev/null +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -0,0 +1,69 @@ +--- +# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues. +# +# To run: +# +# 1. run the playbook: +# +# ansible-playbook -e 'cli_tag_name=' docker_storage_cleanup.yml +# +# Example: +# +# ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml +# +# Notes: +# * This *should* not interfere with running docker images +# + +- name: Clean up Docker Storage + gather_facts: no + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + + pre_tasks: + + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + + - name: Ensure docker is running + service: + name: docker + state: started + enabled: yes + + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines + + - name: Remove exited and dead containers + shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm" + ignore_errors: yes + + - name: Remove dangling docker images + shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" + ignore_errors: yes + + - name: Remove non-running docker images + shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" + ignore_errors: yes + + # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support + - name: update zabbix docker items + command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py + + # Get and show docker info again. + - name: Get docker info + command: docker info + register: docker_info + + - name: Show docker info + debug: + var: docker_info.stdout_lines -- cgit v1.2.3 From d02e346a12c356bd87c0e233d22db03791732841 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Mon, 21 Sep 2015 10:47:52 -0400 Subject: commented out dangerous playbook option for cleanup --- playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index 1946a5f4f..53a5c15ef 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -51,9 +51,10 @@ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" ignore_errors: yes - - name: Remove non-running docker images - shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" - ignore_errors: yes +# mwoodson & twiest: this is dangerous, commenting out for now. +# - name: Remove non-running docker images +# shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" +# ignore_errors: yes # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - name: update zabbix docker items -- cgit v1.2.3 From 15fef0ed1619709446d7dd0b61d198cc650f53cc Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Mon, 21 Sep 2015 12:51:56 -0400 Subject: changed the docker cleanup to exclude certain registries --- playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index 53a5c15ef..a19291a9f 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -51,10 +51,9 @@ shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi" ignore_errors: yes -# mwoodson & twiest: this is dangerous, commenting out for now. -# - name: Remove non-running docker images -# shell: "docker images -aq | xargs --no-run-if-empty docker rmi 2>/dev/null" -# ignore_errors: yes + - name: Remove non-running docker images + shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null" + ignore_errors: yes # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - name: update zabbix docker items -- cgit v1.2.3 From 44f2904159c5a3e0045eb413287a9c1778f91adb Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 10 Sep 2015 10:27:35 -0400 Subject: Upgrades --- playbooks/adhoc/upgrades/README.md | 21 ++++++ playbooks/adhoc/upgrades/filter_plugins | 1 + playbooks/adhoc/upgrades/lookup_plugins | 1 + playbooks/adhoc/upgrades/roles | 1 + playbooks/adhoc/upgrades/upgrade.yml | 115 ++++++++++++++++++++++++++++++++ 5 files changed, 139 insertions(+) create mode 100644 playbooks/adhoc/upgrades/README.md create mode 120000 playbooks/adhoc/upgrades/filter_plugins create mode 120000 playbooks/adhoc/upgrades/lookup_plugins create mode 120000 playbooks/adhoc/upgrades/roles create mode 100644 playbooks/adhoc/upgrades/upgrade.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md new file mode 100644 index 000000000..6de8a970f --- /dev/null +++ b/playbooks/adhoc/upgrades/README.md @@ -0,0 +1,21 @@ +# [NOTE] +This playbook will re-run installation steps overwriting any local +modifications. You should ensure that your inventory has been updated with any +modifications you've made after your initial installation. If you find any items +that cannot be configured via ansible please open an issue at +https://github.com/openshift/openshift-ansible + +# Overview +This playbook is available as a technical preview. It currently performs the +following steps. + + * Upgrade and restart master services + * Upgrade and restart node services + * Applies latest configuration by re-running the installation playbook + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +# Usage +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins new file mode 120000 index 000000000..b0b7a3414 --- /dev/null +++ b/playbooks/adhoc/upgrades/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins new file mode 120000 index 000000000..73cafffe5 --- /dev/null +++ b/playbooks/adhoc/upgrades/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/adhoc/upgrades/roles @@ -0,0 +1 @@ +../../../roles/ \ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml new file mode 100644 index 000000000..e666f0472 --- /dev/null +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -0,0 +1,115 @@ +--- +- name: Re-Run cluster configuration to apply latest configuration changes + include: ../../common/openshift-cluster/config.yml + vars: + g_etcd_group: "{{ 'etcd' }}" + g_masters_group: "{{ 'masters' }}" + g_nodes_group: "{{ 'nodes' }}" + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_deployment_type: "{{ deployment_type }}" + +- name: Upgrade masters + hosts: masters + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade master packages + yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest + - name: Restart master services + service: name="{{ openshift.common.service_type}}-master" state=restarted + +- name: Upgrade nodes + hosts: nodes + vars: + openshift_version: "{{ openshift_pkg_version | default('') }}" + tasks: + - name: Upgrade node packages + yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest + - name: Restart node services + service: name="{{ openshift.common.service_type }}-node" state=restarted + +- name: Determine new master version + hosts: oo_first_master + tasks: + - name: Determine new version + command: > + rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master + register: _new_version + +- name: Ensure AOS 3.0.2 or Origin 1.0.6 + hosts: oo_first_master + tasks: + fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later + when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) + +- name: Update cluster policy + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-roles --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --confirm + +- name: Upgrade default router + hosts: oo_first_master + vars: + - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default router + command: > + {{ oc_cmd }} get -n default dc/router + register: _default_router + failed_when: false + changed_when: false + - name: Check for allowHostNetwork and allowHostPorts + when: _default_router.rc == 0 + shell: > + {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork + register: _scc + - name: Grant allowHostNetwork and allowHostPorts + when: + - _default_router.rc == 0 + - "'false' in _scc.stdout" + command: > + {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 + - name: Update deployment config to 1.0.4/3.0.1 spec + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' + - name: Switch to hostNetwork=true + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' + - name: Update router image to current version + when: _default_router.rc == 0 + command: > + {{ oc_cmd }} patch dc/router -p + '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' + +- name: Upgrade default + hosts: oo_first_master + vars: + - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" + - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + tasks: + - name: Check for default registry + command: > + {{ oc_cmd }} get -n default dc/docker-registry + register: _default_registry + failed_when: false + changed_when: false + - name: Update registry image to current version + when: _default_registry.rc == 0 + command: > + {{ oc_cmd }} patch dc/docker-registry -p + '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' + +- name: Update image streams and templates + hosts: oo_first_master + vars: + openshift_examples_import_command: "update" + openshift_deployment_type: "{{ deployment_type }}" + roles: + - openshift_examples -- cgit v1.2.3 From 6c6635df9d2fb57b1e70bfc63b7301b7e7c28d72 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 17 Sep 2015 15:01:51 -0700 Subject: Added S3 docker-registry config script --- playbooks/adhoc/s3_registry/s3_registry.j2 | 20 +++++++++++ playbooks/adhoc/s3_registry/s3_registry.yml | 55 +++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 playbooks/adhoc/s3_registry/s3_registry.j2 create mode 100644 playbooks/adhoc/s3_registry/s3_registry.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 new file mode 100644 index 000000000..eb8660f6c --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -0,0 +1,20 @@ +version: 0.1 +log: + level: debug +http: + addr: :5000 +storage: + cache: + layerinfo: inmemory + s3: + accesskey: {{ accesskey }} + secretkey: {{ secretkey }} + region: us-east-1 + bucket: {{ bucketname }} + encrypt: true + secure: true + v4auth: true + rootdirectory: /registry +middleware: + repository: + - name: openshift diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml new file mode 100644 index 000000000..61280df0b --- /dev/null +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -0,0 +1,55 @@ +--- +# This playbook creates an S3 bucket, if it doesn't already exist, and configures the docker registry service to use the bucket as its backend storage. +# Usage: +# ansible-playbook s3_registry.yml -e bucketname="mybucket" -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e master="master fqdn or IP" -i "master," +# +# Example: +# ansible-playbook s3_registry.yml -e accesskey="asdf" -e secretkey="hjkl" -e bucketname="testbucket" -e master="54.173.148.238" -i "54.173.148.238," +# +# The bucket name can be anything, but generally should correspond with your cluster name. +# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. +# The 'master' param is the fqdn or public IP of your cluster's master. +# The -i param allows this playbook to be run on your master, even if it's not yet in your main inventory file. (The comma is mandatory). + +- hosts: "{{ master }}" + remote_user: root + gather_facts: False + + tasks: + + - name: Create S3 bucket + local_action: + module: s3 bucket={{ bucketname|quote }} mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + + - name: Generate docker registry config + template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 + + - name: Determine if new secrets are needed + command: oc get secrets + register: secrets + + - name: Create registry secrets + command: oc secrets new dockerregistry /root/config.yml + when: "'dockerregistry' not in secrets.stdout" + + - name: Determine if service account contains secrets + command: oc describe serviceaccount/registry + register: serviceaccount + + - name: Add secrets to registry service account + command: oc secrets add serviceaccount/registry secrets/dockerregistry + when: "'dockerregistry' not in serviceaccount.stdout" + + - name: Determine if deployment config contains secrets + command: oc volume dc/docker-registry --list + register: dc + + - name: Add secrets to registry deployment config + command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry + when: "'dockersecrets' not in dc.stdout" + + - name: Scale up registry + command: oc scale --replicas=1 dc/docker-registry + + - name: Delete temporary config file + file: path=/root/config.yml state=absent -- cgit v1.2.3 From 9deff4bd696168111316dc366c1b193e02e08c8b Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Thu, 24 Sep 2015 11:56:30 -0700 Subject: added dynamic inventory support for single-master clusters --- playbooks/adhoc/s3_registry/s3_registry.j2 | 2 +- playbooks/adhoc/s3_registry/s3_registry.yml | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index eb8660f6c..026b24456 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -10,7 +10,7 @@ storage: accesskey: {{ accesskey }} secretkey: {{ secretkey }} region: us-east-1 - bucket: {{ bucketname }} + bucket: {{ clusterid }}-docker encrypt: true secure: true v4auth: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 61280df0b..30b873db3 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,17 +1,12 @@ --- -# This playbook creates an S3 bucket, if it doesn't already exist, and configures the docker registry service to use the bucket as its backend storage. +# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e bucketname="mybucket" -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e master="master fqdn or IP" -i "master," +# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" # -# Example: -# ansible-playbook s3_registry.yml -e accesskey="asdf" -e secretkey="hjkl" -e bucketname="testbucket" -e master="54.173.148.238" -i "54.173.148.238," -# -# The bucket name can be anything, but generally should correspond with your cluster name. # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. -# The 'master' param is the fqdn or public IP of your cluster's master. -# The -i param allows this playbook to be run on your master, even if it's not yet in your main inventory file. (The comma is mandatory). +# The 'clusterid' is the short name of your cluster. -- hosts: "{{ master }}" +- hosts: security_group_{{ clusterid }}_master remote_user: root gather_facts: False @@ -19,7 +14,7 @@ - name: Create S3 bucket local_action: - module: s3 bucket={{ bucketname|quote }} mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 -- cgit v1.2.3 From 2a4b5b7322c8b0c8e84aae43d1ff411259bf9b61 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 29 Sep 2015 11:16:25 -0400 Subject: added the grow_docker_vg adhoc playbook --- .../grow_docker_vg/filter_plugins/oo_filters.py | 41 +++++ playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 204 +++++++++++++++++++++ 2 files changed, 245 insertions(+) create mode 100644 playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py create mode 100644 playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py new file mode 100644 index 000000000..d0264cde9 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# vim: expandtab:tabstop=4:shiftwidth=4 +''' +Custom filters for use in openshift-ansible +''' + +import pdb + + +class FilterModule(object): + ''' Custom ansible filters ''' + + @staticmethod + def oo_pdb(arg): + ''' This pops you into a pdb instance where arg is the data passed in + from the filter. + Ex: "{{ hostvars | oo_pdb }}" + ''' + pdb.set_trace() + return arg + + @staticmethod + def translate_volume_name(volumes, target_volume): + ''' + This filter matches a device string /dev/sdX to /dev/xvdX + It will then return the AWS volume ID + ''' + for vol in volumes: + translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd") + if target_volume.startswith(translated_name): + return vol["id"] + + return None + + + def filters(self): + ''' returns a mapping of filters to methods ''' + return { + "translate_volume_name": self.translate_volume_name, + } diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml new file mode 100644 index 000000000..a88553ac0 --- /dev/null +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -0,0 +1,204 @@ +--- +# This playbook grows the docker VG on a node by: +# * add a new volume +# * add volume to the existing VG. +# * pv move to the new volume. +# * remove old volume +# * detach volume +# * mark old volume in AWS with "REMOVE ME" tag +# * grow docker LVM to 90% of the VG +# +# To run: +# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment +# export AWS_ACCESS_KEY_ID='XXXXX' +# export AWS_SECRET_ACCESS_KEY='XXXXXX' +# +# 2. run the playbook: +# ansible-playbook -e 'cli_tag_name=' grow_docker_vg.yml +# +# Example: +# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml +# +# Notes: +# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable +# * This does a GP2 by default. Support for Provisioned IOPS has not been added +# * This will assign the new volume to /dev/xvdc. This is not variablized, yet. +# * This can be done with NO downtime on the host +# + +- name: Grow the docker volume group + hosts: "tag_Name_{{ cli_tag_name }}" + user: root + connection: ssh + gather_facts: no + + vars: + cli_volume_type: gp2 + cli_volume_size: 55 +# cli_volume_iops: "{{ 30 * cli_volume_size }}" + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_tag_name + - cli_volume_size + + - debug: + var: hosts + + - name: start docker + service: + name: docker + state: started + + - name: Determine if Storage Driver (docker info) is devicemapper + shell: docker info | grep 'Storage Driver:.*devicemapper' + register: device_mapper_check + ignore_errors: yes + + - debug: + var: device_mapper_check + + - name: fail if we don't detect devicemapper + fail: + msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually. + when: device_mapper_check.rc == 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the volume group. + - name: Attempt to find the Volume Group that docker is using + shell: lvs | grep docker-pool | awk '{print $2}' + register: docker_vg_name + ignore_errors: yes + + - debug: + var: docker_vg_name + + - name: fail if we don't find a docker volume group + fail: + msg: Unable to find docker volume group. Please investigate manually. + when: docker_vg_name.stdout_lines|length != 1 + + # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test + # and find the physical volume. + - name: Attempt to find the Phyisical Volume that docker is using + shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'" + register: docker_pv_name + ignore_errors: yes + + - debug: + var: docker_pv_name + + - name: fail if we don't find a docker physical volume + fail: + msg: Unable to find docker physical volume. Please investigate manually. + when: docker_pv_name.stdout_lines|length != 1 + + + - name: get list of volumes from AWS + delegate_to: localhost + ec2_vol: + state: list + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + register: attached_volumes + + - debug: var=attached_volumes + + - name: get volume id of current docker volume + set_fact: + old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}" + + - debug: var=old_docker_volume_id + + - name: check to see if /dev/xvdc exists + command: test -e /dev/xvdc + register: xvdc_check + ignore_errors: yes + + - debug: var=xvdc_check + + - name: fail if /dev/xvdc already exists + fail: + msg: /dev/xvdc already exists. Please investigate + when: xvdc_check.rc == 0 + + - name: Create a volume and attach it + delegate_to: localhost + ec2_vol: + state: present + instance: "{{ ec2_id }}" + region: "{{ ec2_region }}" + volume_size: "{{ cli_volume_size | default(30, True)}}" + volume_type: "{{ cli_volume_type }}" + device_name: /dev/xvdc + register: create_volume + + - debug: var=create_volume + + - name: Fail when problems creating volumes and attaching + fail: + msg: "Failed to create or attach volume msg: {{ create_volume.msg }}" + when: create_volume.msg is defined + + - name: tag the vol with a name + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }} + args: + tags: + Name: "{{ ec2_tag_Name }}" + env: "{{ ec2_tag_environment }}" + register: voltags + + - name: check for attached drive + command: test -b /dev/xvdc + register: attachment_check + until: attachment_check.rc == 0 + retries: 30 + delay: 2 + + - name: partition the new drive and make it lvm + command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm + + - name: pvcreate /dev/xvdc + command: pvcreate /dev/xvdc1 + + - name: Extend the docker volume group + command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1 + + - name: pvmove onto new volume + command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" + async: 3600 + poll: 10 + + - name: Remove the old docker drive from the volume group + command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}" + + - name: Remove the pv from the old drive + command: "pvremove {{ docker_pv_name.stdout }}" + + - name: Extend the docker lvm + command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool" + + - name: detach old docker volume + delegate_to: localhost + ec2_vol: + region: "{{ ec2_region }}" + id: "{{ old_docker_volume_id }}" + instance: None + + - name: tag the old vol valid label + delegate_to: localhost + ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}} + args: + tags: + Name: "{{ ec2_tag_Name }} REMOVE ME" + register: voltags + + - name: Update the /etc/sysconfig/docker-storage-setup with new device + lineinfile: + dest: /etc/sysconfig/docker-storage-setup + regexp: ^DEVS= + line: DEVS=/dev/xvdc -- cgit v1.2.3 From 11f0f570243d39bb2e96bfd64ef9d180163c5c38 Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Tue, 29 Sep 2015 11:40:42 -0400 Subject: added comment to the grow_docker_vg playbook --- playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 2 ++ 1 file changed, 2 insertions(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index a88553ac0..ef9b45abd 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -24,6 +24,8 @@ # * This does a GP2 by default. Support for Provisioned IOPS has not been added # * This will assign the new volume to /dev/xvdc. This is not variablized, yet. # * This can be done with NO downtime on the host +# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is +# the LV that gets created via the "docker-storage-setup" command # - name: Grow the docker volume group -- cgit v1.2.3 From f1ee60e1781735486c57a15c83104c7228a158cc Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 7 Oct 2015 14:25:02 -0400 Subject: Removed io1 type for gp2 --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index c9ae923bb..82870664c 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -27,9 +27,8 @@ gather_facts: no vars: - cli_volume_type: io1 + cli_volume_type: gp2 cli_volume_size: 30 - cli_volume_iops: "{{ 30 * cli_volume_size }}" pre_tasks: - fail: @@ -104,7 +103,6 @@ volume_size: "{{ cli_volume_size | default(30, True)}}" volume_type: "{{ cli_volume_type }}" device_name: /dev/xvdb - iops: "{{ 30 * cli_volume_size }}" register: vol - debug: var=vol -- cgit v1.2.3 From b6fe5ba80bb131543bd09374df88821c8754da64 Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Wed, 7 Oct 2015 14:55:43 -0400 Subject: Removing the last step as it will fail. --- .../adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml | 7 ------- 1 file changed, 7 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index 82870664c..b6a2d2f26 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -140,10 +140,3 @@ - debug: var=dockerstart - - name: Wait for docker to stabilize - pause: - seconds: 30 - - # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support - - name: update zabbix docker items - command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py -- cgit v1.2.3 From dc9e087205b7ce4b843a40f5d0046b5ad6634a70 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Wed, 7 Oct 2015 10:52:15 -0400 Subject: Add `oadm reconcile-cluster-role-bindings` to upgrade playbook. Switch to version_compare filter for conditionals. --- playbooks/adhoc/upgrades/upgrade.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index e666f0472..b43ab7607 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -40,7 +40,7 @@ hosts: oo_first_master tasks: fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later - when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2) + when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) - name: Update cluster policy hosts: oo_first_master @@ -50,6 +50,19 @@ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --confirm +- name: Update cluster policy bindings + hosts: oo_first_master + tasks: + - name: oadm policy reconcile-cluster-role-bindings --confirm + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + - name: Upgrade default router hosts: oo_first_master vars: -- cgit v1.2.3 From 17d55a94ed60e7e89fc704a80e61783d74c6af2f Mon Sep 17 00:00:00 2001 From: Matt Woodson Date: Wed, 14 Oct 2015 09:52:31 -0400 Subject: moved the timeout to 12 hours in the docker vg move --- playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index ef9b45abd..63d473146 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -172,7 +172,7 @@ - name: pvmove onto new volume command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1" - async: 3600 + async: 43200 poll: 10 - name: Remove the old docker drive from the volume group -- cgit v1.2.3 From ef1fef97dee3ae291344478d987108836e9a664d Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Thu, 15 Oct 2015 14:16:38 -0400 Subject: Removed AWS keys from command line, and substituted with environment variable lookup. --- playbooks/adhoc/s3_registry/s3_registry.j2 | 4 ++-- playbooks/adhoc/s3_registry/s3_registry.yml | 13 ++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.j2 b/playbooks/adhoc/s3_registry/s3_registry.j2 index 026b24456..acfa89515 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.j2 +++ b/playbooks/adhoc/s3_registry/s3_registry.j2 @@ -7,8 +7,8 @@ storage: cache: layerinfo: inmemory s3: - accesskey: {{ accesskey }} - secretkey: {{ secretkey }} + accesskey: {{ aws_access_key }} + secretkey: {{ aws_secret_key }} region: us-east-1 bucket: {{ clusterid }}-docker encrypt: true diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 30b873db3..92be64e17 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -10,11 +10,22 @@ remote_user: root gather_facts: False + vars: + aws_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" + aws_secret_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" tasks: + - name: Check for AWS creds + fail: + msg: "Couldn't find {{ item }} creds in ENV" + when: "{{ item }} == ''" + with_items: + - aws_access_key + - aws_secret_key + - name: Create S3 bucket local_action: - module: s3 bucket="{{ clusterid }}-docker" mode=create aws_access_key={{ accesskey|quote }} aws_secret_key={{ secretkey|quote }} + module: s3 bucket="{{ clusterid }}-docker" mode=create - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 -- cgit v1.2.3 From 14ae81a5c18a6cdf5bf00ada9eeec21a82cd982e Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Thu, 15 Oct 2015 14:33:58 -0400 Subject: Update example to remove passing in aws creds on command line. --- playbooks/adhoc/s3_registry/s3_registry.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 92be64e17..d1546b6fa 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -1,7 +1,7 @@ --- # This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage. # Usage: -# ansible-playbook s3_registry.yml -e accesskey="S3 aws access key" -e secretkey="S3 aws secret key" -e clusterid="mycluster" +# ansible-playbook s3_registry.yml -e clusterid="mycluster" # # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- cgit v1.2.3 From ba7bf4f22ac6a7756a6a8ce6c28276667a968742 Mon Sep 17 00:00:00 2001 From: Thomas Wiest Date: Mon, 19 Oct 2015 16:10:00 -0400 Subject: added a generic playbook (ops-docker-loopback-to-direct-lvm.yml) to convert a host from loop back to direct-lvm docker storage. --- .../ops-docker-loopback-to-direct-lvm.yml | 104 +++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100755 playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml new file mode 100755 index 000000000..614b2537a --- /dev/null +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -0,0 +1,104 @@ +#!/usr/bin/ansible-playbook +--- +# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker). +# +# It requires the block device to be already provisioned and attached to the host. This is a generic playbook, +# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory. +# +# To run: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host= -e cli_docker_device= +# +# Example: +# ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb +# +# Notes: +# * This will remove /var/lib/docker! +# * You may need to re-deploy docker images after this is run (like monitoring) + +- name: Fix docker to have a provisioned iops drive + hosts: "{{ cli_name }}" + user: root + connection: ssh + gather_facts: no + + pre_tasks: + - fail: + msg: "This playbook requires {{item}} to be set." + when: "{{ item }} is not defined or {{ item }} == ''" + with_items: + - cli_docker_device + + - name: start docker + service: + name: docker + state: started + + - name: Determine if loopback + shell: docker info | grep 'Data file:.*loop' + register: loop_device_check + ignore_errors: yes + + - debug: + var: loop_device_check + + - name: fail if we don't detect loopback + fail: + msg: loopback not detected! Please investigate manually. + when: loop_device_check.rc == 1 + + - name: stop zagg client monitoring container + service: + name: oso-rhel7-zagg-client + state: stopped + ignore_errors: yes + + - name: stop pcp client monitoring container + service: + name: oso-f22-host-monitoring + state: stopped + ignore_errors: yes + + - name: "check to see if {{ cli_docker_device }} exists" + command: "test -e {{ cli_docker_device }}" + register: docker_dev_check + ignore_errors: yes + + - debug: var=docker_dev_check + + - name: "fail if {{ cli_docker_device }} doesn't exist" + fail: + msg: "{{ cli_docker_device }} doesn't exist. Please investigate" + when: docker_dev_check.rc != 0 + + - name: stop docker + service: + name: docker + state: stopped + + - name: delete /var/lib/docker + command: rm -rf /var/lib/docker + + - name: remove /var/lib/docker + command: rm -rf /var/lib/docker + + - name: copy the docker-storage-setup config file + copy: + content: > + DEVS={{ cli_docker_device }} + VG=docker_vg + dest: /etc/sysconfig/docker-storage-setup + owner: root + group: root + mode: 0664 + + - name: docker storage setup + command: docker-storage-setup + register: setup_output + + - debug: var=setup_output + + - name: start docker + command: systemctl start docker.service + register: dockerstart + + - debug: var=dockerstart -- cgit v1.2.3 From 538950fd7650ad09523553eff634b4d5a672edec Mon Sep 17 00:00:00 2001 From: Joel Diaz Date: Mon, 19 Oct 2015 17:36:58 -0400 Subject: Fix typos on env vars. --- playbooks/adhoc/s3_registry/s3_registry.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index d1546b6fa..5dc1abf17 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -11,8 +11,8 @@ gather_facts: False vars: - aws_access_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" - aws_secret_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" + aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" tasks: - name: Check for AWS creds -- cgit v1.2.3 From 8691cd2947146a24237fadc443eb02acf805a606 Mon Sep 17 00:00:00 2001 From: Stefanie Forrester Date: Fri, 11 Sep 2015 13:13:17 -0700 Subject: Support HA or single router, and start work on registry --- playbooks/adhoc/s3_registry/s3_registry.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/s3_registry/s3_registry.yml b/playbooks/adhoc/s3_registry/s3_registry.yml index 5dc1abf17..4dcef1a42 100644 --- a/playbooks/adhoc/s3_registry/s3_registry.yml +++ b/playbooks/adhoc/s3_registry/s3_registry.yml @@ -6,13 +6,14 @@ # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role. # The 'clusterid' is the short name of your cluster. -- hosts: security_group_{{ clusterid }}_master +- hosts: tag_env-host-type_{{ clusterid }}-openshift-master remote_user: root gather_facts: False vars: - aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}" - aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}" + aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}" + tasks: - name: Check for AWS creds @@ -23,10 +24,16 @@ - aws_access_key - aws_secret_key + - name: Scale down registry + command: oc scale --replicas=0 dc/docker-registry + - name: Create S3 bucket local_action: module: s3 bucket="{{ clusterid }}-docker" mode=create + - name: Set up registry environment variable + command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml + - name: Generate docker registry config template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600 @@ -54,6 +61,9 @@ command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry when: "'dockersecrets' not in dc.stdout" + - name: Wait for deployment config to take effect before scaling up + pause: seconds=30 + - name: Scale up registry command: oc scale --replicas=1 dc/docker-registry -- cgit v1.2.3 From a921f8c296467cf72b0d273d8891dcd2f2570bea Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 21 Oct 2015 17:25:51 -0400 Subject: Fix yaml tabbing --- playbooks/adhoc/upgrades/upgrade.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml index b43ab7607..56a1df860 100644 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ b/playbooks/adhoc/upgrades/upgrade.yml @@ -61,7 +61,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm - when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') + when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') - name: Upgrade default router hosts: oo_first_master -- cgit v1.2.3 From 0c5e2522e44aee9309336049633eb82531f997b6 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Tue, 20 Oct 2015 14:38:22 -0400 Subject: Improvements to uninstallation playbook This is related to https://trello.com/c/314nwSvt/58-3-uninstall-playbook The original atomic_openshift_tutorial_reset.yml now calls the uninstall playbook for most parts. All the originally functionally is still intact. The main differences between the two playbooks is that the uninstall playbook is careful only to delete content that ansible originally installed. --- .../adhoc/atomic_openshift_tutorial_reset.yml | 77 +------------- playbooks/adhoc/uninstall.yml | 111 +++++++++++++++++++++ 2 files changed, 114 insertions(+), 74 deletions(-) create mode 100644 playbooks/adhoc/uninstall.yml (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml index 54d3ea278..c14d08e87 100644 --- a/playbooks/adhoc/atomic_openshift_tutorial_reset.yml +++ b/playbooks/adhoc/atomic_openshift_tutorial_reset.yml @@ -1,6 +1,9 @@ # This deletes *ALL* Docker images, and uninstalls OpenShift and # Atomic Enterprise RPMs. It is primarily intended for use # with the tutorial as well as for developers to reset state. +# +--- +- include: uninstall.yml - hosts: - OSEv3:children @@ -8,59 +11,6 @@ sudo: yes tasks: - - service: name={{ item }} state=stopped - with_items: - - openvswitch - - origin-master - - origin-node - - atomic-openshift-master - - atomic-openshift-node - - openshift-master - - openshift-node - - atomic-enterprise-master - - atomic-enterprise-node - - etcd - - - yum: name={{ item }} state=absent - with_items: - - openvswitch - - etcd - - origin - - origin-master - - origin-node - - origin-sdn-ovs - - tuned-profiles-origin-node - - atomic-openshift - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - tuned-profiles-atomic-openshift-node - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node - - openshift - - openshift-master - - openshift-node - - openshift-sdn-ovs - - tuned-profiles-openshift-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - shell: docker ps -a -q | xargs docker stop changed_when: False failed_when: False @@ -73,27 +23,6 @@ changed_when: False failed_when: False - - file: path={{ item }} state=absent - with_items: - - /etc/openshift-sdn - - /root/.kube - - /etc/origin - - /etc/atomic-enterprise - - /etc/openshift - - /var/lib/origin - - /var/lib/openshift - - /var/lib/atomic-enterprise - - /etc/sysconfig/origin-master - - /etc/sysconfig/origin-node - - /etc/sysconfig/atomic-openshift-master - - /etc/sysconfig/atomic-openshift-node - - /etc/sysconfig/openshift-master - - /etc/sysconfig/openshift-node - - /etc/sysconfig/atomic-enterprise-master - - /etc/sysconfig/atomic-enterprise-node - - /etc/etcd - - /var/lib/etcd - - user: name={{ item }} state=absent remove=yes with_items: - alice diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml new file mode 100644 index 000000000..1a3c56d95 --- /dev/null +++ b/playbooks/adhoc/uninstall.yml @@ -0,0 +1,111 @@ +# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift +# Enterprise content installed by ansible. This includes: +# +# configuration +# containers +# example templates and imagestreams +# images +# RPMs +--- +- hosts: + - OSEv3:children + + sudo: yes + + tasks: + - service: name={{ item }} state=stopped + with_items: + - openvswitch + - origin-master + - origin-node + - atomic-openshift-master + - atomic-openshift-node + - openshift-master + - openshift-node + - atomic-enterprise-master + - atomic-enterprise-node + - etcd + + - yum: name={{ item }} state=absent + with_items: + - openvswitch + - etcd + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-origin-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - tuned-profiles-atomic-openshift-node + - atomic-enterprise + - atomic-enterprise-master + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - openshift + - openshift-master + - openshift-node + - openshift-sdn-ovs + - tuned-profiles-openshift-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node + changed_when: False + failed_when: False + with_items: + - openshift-enterprise + - atomic-enterprise + - origin + + - shell: docker images | grep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry.access.redhat.com/openshift3 + - registry.access.redhat.com/aep3 + - docker.io/openshift + + - shell: "docker rmi {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + + - file: path={{ item }} state=absent + with_items: + - /etc/atomic-enterprise + - /etc/etcd + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-node + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openshift-node + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-node + - /root/.kube + - /usr/share/openshift/examples + - /var/lib/atomic-enterprise + - /var/lib/etcd + - /var/lib/openshift + - /var/lib/origin -- cgit v1.2.3 From 1bf7844f61785e717f8563d03994841d0a71ac28 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 10:26:45 -0400 Subject: Adding *master-api and *master-controllers to the list of units to stop (also sorted the various lists alphabetically) --- playbooks/adhoc/uninstall.yml | 44 ++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 1a3c56d95..ecd858e68 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -15,41 +15,47 @@ tasks: - service: name={{ item }} state=stopped with_items: - - openvswitch - - origin-master - - origin-node + - atomic-enterprise-master + - atomic-enterprise-node - atomic-openshift-master + - atomic-openshift-master-api + - atomic-openshift-master-controllers - atomic-openshift-node + - etcd - openshift-master + - openshift-master-api + - openshift-master-controllers - openshift-node - - atomic-enterprise-master - - atomic-enterprise-node - - etcd - - - yum: name={{ item }} state=absent - with_items: - openvswitch - - etcd - - origin - origin-master + - origin-master-api + - origin-master-controllers - origin-node - - origin-sdn-ovs - - tuned-profiles-origin-node - - atomic-openshift - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - tuned-profiles-atomic-openshift-node + + - yum: name={{ item }} state=absent + with_items: - atomic-enterprise - atomic-enterprise-master - atomic-enterprise-node - atomic-enterprise-sdn-ovs - - tuned-profiles-atomic-enterprise-node + - atomic-openshift + - atomic-openshift-master + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - etcd - openshift - openshift-master - openshift-node - openshift-sdn-ovs + - openvswitch + - origin + - origin-master + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node - tuned-profiles-openshift-node + - tuned-profiles-origin-node - shell: systemctl reset-failed changed_when: False -- cgit v1.2.3 From 1b0c615d3c1c7dfd6484ba399763282586475599 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 11:31:18 -0400 Subject: Removing the openshift facts --- playbooks/adhoc/uninstall.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ecd858e68..3e865706d 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -96,6 +96,7 @@ - file: path={{ item }} state=absent with_items: + - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - /etc/etcd - /etc/openshift -- cgit v1.2.3 From 6ada8b8eb4ebe60ba18226caa5b4812b26161379 Mon Sep 17 00:00:00 2001 From: Brenton Leanhardt Date: Wed, 21 Oct 2015 15:56:24 -0400 Subject: Deleting exited openshift containers and some other minor touch ups --- playbooks/adhoc/uninstall.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'playbooks/adhoc') diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 3e865706d..40db668da 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -39,6 +39,7 @@ - atomic-enterprise-node - atomic-enterprise-sdn-ovs - atomic-openshift + - atomic-openshift-clients - atomic-openshift-master - atomic-openshift-node - atomic-openshift-sdn-ovs @@ -46,6 +47,7 @@ - openshift - openshift-master - openshift-node + - openshift-sdn - openshift-sdn-ovs - openvswitch - origin @@ -80,6 +82,20 @@ - atomic-enterprise - origin + - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}' + changed_when: False + failed_when: False + register: exited_containers_to_delete + with_items: + - aep3/aep + - openshift3/ose + - openshift/origin + + - shell: "docker rm {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ exited_containers_to_delete.results }}" + - shell: docker images | grep {{ item }} | awk '{ print $3 }' changed_when: False failed_when: False @@ -89,7 +105,7 @@ - registry.access.redhat.com/aep3 - docker.io/openshift - - shell: "docker rmi {{ item.stdout_lines | join(' ') }}" + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" changed_when: False failed_when: False with_items: "{{ images_to_delete.results }}" -- cgit v1.2.3