From 0c983829f2c43a9e7078a5395f768ba965685268 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Fri, 10 Jun 2016 15:23:28 -0300 Subject: Fix docker 1.10 upgrade on embedded etcd masters. The tasks were attempting to stop/start etcd, which would be fine on the stop but on start could actually kick the non-containerized etcd service which happens to be layed down even though it's unused. When the service was requested to start again it would claim the port embedded etcd needs and the master would then fail to come up. Instead use the correct etcd_container service. --- playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 6c12e8245..8b1b2fb1b 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -59,7 +59,7 @@ - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" - - etcd + - etcd_container - openvswitch failed_when: false when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool @@ -77,7 +77,7 @@ - name: Restart containerized services service: name={{ item }} state=started with_items: - - etcd + - etcd_container - openvswitch - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" -- cgit v1.2.3 From e63d773e91e2d0d4e1f22be2de101bedb61a777d Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Mon, 13 Jun 2016 13:48:57 -0400 Subject: Separate uninstall plays by group. --- playbooks/adhoc/uninstall.yml | 588 +++++++++++++++++++++++++----------------- 1 file changed, 351 insertions(+), 237 deletions(-) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index dbf924683..a141b3303 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -7,173 +7,286 @@ # images # RPMs --- -- hosts: - - OSEv3:children +- hosts: OSEv3:children + become: yes + tasks: + - name: Detecting Operating System + shell: ls /run/ostree-booted + ignore_errors: yes + failed_when: false + register: ostree_output + + # Since we're not calling openshift_facts we'll do this for now + - set_fact: + is_atomic: "{{ ostree_output.rc == 0 }}" + - set_fact: + is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" +- hosts: nodes become: yes + tasks: + - name: Stop services + service: name={{ item }} state=stopped + with_items: + - atomic-enterprise-node + - atomic-openshift-node + - openshift-node + - openvswitch + - origin-node + failed_when: false + + - name: unmask services + command: systemctl unmask "{{ item }}" + changed_when: False + failed_when: False + with_items: + - firewalld + + - name: Remove packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool + with_items: + - atomic-enterprise + - atomic-enterprise-node + - atomic-enterprise-sdn-ovs + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-node + - atomic-openshift-sdn-ovs + - cockpit-bridge + - cockpit-docker + - cockpit-shell + - cockpit-ws + - kubernetes-client + - openshift + - openshift-node + - openshift-sdn + - openshift-sdn-ovs + - openvswitch + - origin + - origin-clients + - origin-node + - origin-sdn-ovs + - tuned-profiles-atomic-enterprise-node + - tuned-profiles-atomic-openshift-node + - tuned-profiles-openshift-node + - tuned-profiles-origin-node + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - name: Remove br0 interface + shell: ovs-vsctl del-br br0 + changed_when: False + failed_when: False + + - name: Remove linux interfaces + shell: ip link del "{{ item }}" + changed_when: False + failed_when: False + with_items: + - lbr0 + - vlinuxbr + - vovsbr + + - name: restart docker + service: name=docker state=restarted + + - name: restart NetworkManager + service: name=NetworkManager state=restarted + + - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true + changed_when: False + + - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node + changed_when: False + failed_when: False + with_items: + - openshift-enterprise + - atomic-enterprise + - origin + + - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' + changed_when: False + failed_when: False + register: exited_containers_to_delete + with_items: + - aep3.*/aep + - aep3.*/node + - aep3.*/openvswitch + - openshift3/ose + - openshift3/node + - openshift3/openvswitch + - openshift/origin + + - shell: "docker rm {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ exited_containers_to_delete.results }}" + + - shell: docker images | egrep {{ item }} | awk '{ print $3 }' + changed_when: False + failed_when: False + register: images_to_delete + with_items: + - registry\.access\..*redhat\.com/openshift3 + - registry\.access\..*redhat\.com/aep3 + - registry\.qe\.openshift\.com/.* + - registry\.access\..*redhat\.com/rhel7/etcd + - docker.io/openshift + + - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" + changed_when: False + failed_when: False + with_items: "{{ images_to_delete.results }}" + + - name: Remove sdn drop files + file: + path: /run/openshift-sdn + state: absent + - name: Remove remaining files + file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /etc/atomic-enterprise + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/systemd/system/atomic-openshift-node.service + - /etc/systemd/system/atomic-openshift-node-dep.service + - /etc/systemd/system/origin-node.service + - /etc/systemd/system/origin-node-dep.service + - /etc/systemd/system/openvswitch.service + - /etc/sysconfig/atomic-enterprise-node + - /etc/sysconfig/atomic-openshift-node + - /etc/sysconfig/atomic-openshift-node-dep + - /etc/sysconfig/origin-node + - /etc/sysconfig/origin-node-dep + - /etc/sysconfig/openshift-node + - /etc/sysconfig/openshift-node-dep + - /etc/sysconfig/openvswitch + - /etc/sysconfig/origin-node + - /etc/systemd/system/atomic-openshift-node.service.wants + - /run/openshift-sdn + - /var/lib/atomic-enterprise + - /var/lib/openshift + - /var/lib/origin + - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh + - /etc/dnsmasq.d/origin-dns.conf + - /etc/dnsmasq.d/origin-upstream-dns.conf + +- hosts: masters + become: yes tasks: - - name: Detecting Operating System - shell: ls /run/ostree-booted - ignore_errors: yes - failed_when: false - register: ostree_output - - # Since we're not calling openshift_facts we'll do this for now - - set_fact: - is_atomic: "{{ ostree_output.rc == 0 }}" - - set_fact: - is_containerized: "{{ is_atomic or containerized | default(false) | bool }}" - - - name: Stop services - service: name={{ item }} state=stopped - with_items: - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-openshift-master - - atomic-openshift-master-api - - atomic-openshift-master-controllers - - atomic-openshift-node - - etcd - - haproxy - - openshift-master - - openshift-master-api - - openshift-master-controllers - - openshift-node - - openvswitch - - origin-master - - origin-master-api - - origin-master-controllers - - origin-node - - pcsd - failed_when: false - - - name: unmask services - command: systemctl unmask "{{ item }}" - changed_when: False - failed_when: False - with_items: - - etcd - - firewalld - - atomic-openshift-master - - - name: Stop additional atomic services - service: name={{ item }} state=stopped - when: is_containerized | bool - with_items: - - etcd_container - failed_when: false - - - name: Remove packages - action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" - when: not is_atomic | bool - with_items: - - atomic-enterprise - - atomic-enterprise-master - - atomic-enterprise-node - - atomic-enterprise-sdn-ovs - - atomic-openshift - - atomic-openshift-clients - - atomic-openshift-master - - atomic-openshift-node - - atomic-openshift-sdn-ovs - - cockpit-bridge - - cockpit-docker - - cockpit-shell - - cockpit-ws - - corosync - - etcd - - haproxy - - kubernetes-client - - openshift - - openshift-master - - openshift-node - - openshift-sdn - - openshift-sdn-ovs - - openvswitch - - origin - - origin-clients - - origin-master - - origin-node - - origin-sdn-ovs - - pacemaker - - pcs - - tuned-profiles-atomic-enterprise-node - - tuned-profiles-atomic-openshift-node - - tuned-profiles-openshift-node - - tuned-profiles-origin-node - - - shell: systemctl reset-failed - changed_when: False - - - shell: systemctl daemon-reload - changed_when: False - - - name: Remove remaining files - file: path={{ item }} state=absent - with_items: - - "~{{ ansible_ssh_user }}/.kube" - - /etc/ansible/facts.d/openshift.fact - - /etc/atomic-enterprise - - /etc/corosync - - /etc/etcd - - /etc/openshift - - /etc/openshift-sdn - - /etc/origin - - /etc/systemd/system/atomic-openshift-master.service - - /etc/systemd/system/atomic-openshift-master-api.service - - /etc/systemd/system/atomic-openshift-master-controllers.service - - /etc/systemd/system/atomic-openshift-node.service - - /etc/systemd/system/atomic-openshift-node-dep.service - - /etc/systemd/system/origin-master.service - - /etc/systemd/system/origin-master-api.service - - /etc/systemd/system/origin-master-controllers.service - - /etc/systemd/system/origin-node.service - - /etc/systemd/system/origin-node-dep.service - - /etc/systemd/system/etcd_container.service - - /etc/systemd/system/openvswitch.service - - /etc/sysconfig/atomic-enterprise-master - - /etc/sysconfig/atomic-enterprise-master-api - - /etc/sysconfig/atomic-enterprise-master-controllers - - /etc/sysconfig/atomic-enterprise-node - - /etc/sysconfig/atomic-openshift-master - - /etc/sysconfig/atomic-openshift-master-api - - /etc/sysconfig/atomic-openshift-master-controllers - - /etc/sysconfig/atomic-openshift-node - - /etc/sysconfig/atomic-openshift-node-dep - - /etc/sysconfig/origin-master - - /etc/sysconfig/origin-master-api - - /etc/sysconfig/origin-master-controllers - - /etc/sysconfig/origin-node - - /etc/sysconfig/origin-node-dep - - /etc/sysconfig/openshift-master - - /etc/sysconfig/openshift-node - - /etc/sysconfig/openshift-node-dep - - /etc/sysconfig/openvswitch - - /etc/sysconfig/origin-master - - /etc/sysconfig/origin-master-api - - /etc/sysconfig/origin-master-controllers - - /etc/sysconfig/origin-node - - /etc/systemd/system/atomic-openshift-node.service.wants - - /root/.kube - - /run/openshift-sdn - - /usr/share/openshift/examples - - /var/lib/atomic-enterprise - - /var/lib/etcd - - /var/lib/openshift - - /var/lib/origin - - /var/lib/pacemaker - - /usr/lib/systemd/system/atomic-openshift-master-api.service - - /usr/lib/systemd/system/atomic-openshift-master-controllers.service - - /usr/lib/systemd/system/origin-master-api.service - - /usr/lib/systemd/system/origin-master-controllers.service - - /usr/local/bin/openshift - - /usr/local/bin/oadm - - /usr/local/bin/oc - - /usr/local/bin/kubectl - - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - - /etc/dnsmasq.d/origin-dns.conf - - /etc/dnsmasq.d/origin-upstream-dns.conf + - name: Stop services + service: name={{ item }} state=stopped + with_items: + - atomic-enterprise-master + - atomic-openshift-master + - atomic-openshift-master-api + - atomic-openshift-master-controllers + - openshift-master + - openshift-master-api + - openshift-master-controllers + - origin-master + - origin-master-api + - origin-master-controllers + - pcsd + failed_when: false + + - name: unmask services + command: systemctl unmask "{{ item }}" + changed_when: False + failed_when: False + with_items: + - firewalld + - atomic-openshift-master + + - name: Remove packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool + with_items: + - atomic-enterprise + - atomic-enterprise-master + - atomic-openshift + - atomic-openshift-clients + - atomic-openshift-master + - cockpit-bridge + - cockpit-docker + - cockpit-shell + - cockpit-ws + - corosync + - kubernetes-client + - openshift + - openshift-master + - origin + - origin-clients + - origin-master + - pacemaker + - pcs + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - name: Remove remaining files + file: path={{ item }} state=absent + with_items: + - "~{{ ansible_ssh_user }}/.kube" + - /etc/ansible/facts.d/openshift.fact + - /etc/atomic-enterprise + - /etc/corosync + - /etc/openshift + - /etc/openshift-sdn + - /etc/origin + - /etc/systemd/system/atomic-openshift-master.service + - /etc/systemd/system/atomic-openshift-master-api.service + - /etc/systemd/system/atomic-openshift-master-controllers.service + - /etc/systemd/system/origin-master.service + - /etc/systemd/system/origin-master-api.service + - /etc/systemd/system/origin-master-controllers.service + - /etc/systemd/system/openvswitch.service + - /etc/sysconfig/atomic-enterprise-master + - /etc/sysconfig/atomic-enterprise-master-api + - /etc/sysconfig/atomic-enterprise-master-controllers + - /etc/sysconfig/atomic-openshift-master + - /etc/sysconfig/atomic-openshift-master-api + - /etc/sysconfig/atomic-openshift-master-controllers + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-master-api + - /etc/sysconfig/origin-master-controllers + - /etc/sysconfig/openshift-master + - /etc/sysconfig/openvswitch + - /etc/sysconfig/origin-master + - /etc/sysconfig/origin-master-api + - /etc/sysconfig/origin-master-controllers + - /root/.kube + - /usr/share/openshift/examples + - /var/lib/atomic-enterprise + - /var/lib/openshift + - /var/lib/origin + - /var/lib/pacemaker + - /var/lib/pcsd + - /usr/lib/systemd/system/atomic-openshift-master-api.service + - /usr/lib/systemd/system/atomic-openshift-master-controllers.service + - /usr/lib/systemd/system/origin-master-api.service + - /usr/lib/systemd/system/origin-master-controllers.service + - /usr/local/bin/openshift + - /usr/local/bin/oadm + - /usr/local/bin/oc + - /usr/local/bin/kubectl # Since we are potentially removing the systemd unit files for separated # master-api and master-controllers services, so we need to reload the @@ -181,79 +294,80 @@ - name: Reload systemd manager configuration command: systemctl daemon-reload -- hosts: nodes +- hosts: etcd + become: yes + tasks: + - name: Stop services + service: name={{ item }} state=stopped + with_items: + - etcd + failed_when: false + + - name: unmask services + command: systemctl unmask "{{ item }}" + changed_when: False + failed_when: False + with_items: + - etcd + - firewalld + + - name: Stop additional atomic services + service: name={{ item }} state=stopped + when: is_containerized | bool + with_items: + - etcd_container + failed_when: false + + - name: Remove packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool + with_items: + - etcd + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - name: Remove remaining files + file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /etc/etcd + - /etc/systemd/system/etcd_container.service + - /var/lib/etcd + +- hosts: lb become: yes tasks: - - name: Remove br0 interface - shell: ovs-vsctl del-br br0 - changed_when: False - failed_when: False - - name: Remove linux interfaces - shell: ip link del "{{ item }}" - changed_when: False - failed_when: False - with_items: - - lbr0 - - vlinuxbr - - vovsbr - - name: restart docker - service: name=docker state=restarted - - - name: restart NetworkManager - service: name=NetworkManager state=restarted - - - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true - changed_when: False - - - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node - changed_when: False - failed_when: False - with_items: - - openshift-enterprise - - atomic-enterprise - - origin - - - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}' - changed_when: False - failed_when: False - register: exited_containers_to_delete - with_items: - - aep3.*/aep - - aep3.*/node - - aep3.*/openvswitch - - openshift3/ose - - openshift3/node - - openshift3/openvswitch - - openshift/origin - - - shell: "docker rm {{ item.stdout_lines | join(' ') }}" - changed_when: False - failed_when: False - with_items: "{{ exited_containers_to_delete.results }}" - - - shell: docker images | egrep {{ item }} | awk '{ print $3 }' - changed_when: False - failed_when: False - register: images_to_delete - with_items: - - registry\.access\..*redhat\.com/openshift3 - - registry\.access\..*redhat\.com/aep3 - - registry\.qe\.openshift\.com/.* - - registry\.access\..*redhat\.com/rhel7/etcd - - docker.io/openshift - - - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}" - changed_when: False - failed_when: False - with_items: "{{ images_to_delete.results }}" - - - name: Remove sdn drop files - file: - path: /run/openshift-sdn - state: absent + - name: Stop services + service: name={{ item }} state=stopped + with_items: + - haproxy + failed_when: false + + - name: unmask services + command: systemctl unmask "{{ item }}" + changed_when: False + failed_when: False + with_items: + - firewalld + + - name: Remove packages + action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent" + when: not is_atomic | bool + with_items: + - haproxy + + - shell: systemctl reset-failed + changed_when: False + + - shell: systemctl daemon-reload + changed_when: False + + - name: Remove remaining files + file: path={{ item }} state=absent + with_items: + - /etc/ansible/facts.d/openshift.fact + - /var/lib/haproxy -- cgit v1.2.3 From 1960ee8c4db904e7c2d4a9a76d12edf7183894e2 Mon Sep 17 00:00:00 2001 From: Tobias Florek Date: Tue, 14 Jun 2016 15:07:14 +0200 Subject: also volume-mount /etc/sysconfig/docker --- roles/openshift_node/templates/openshift.docker.node.dep.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/openshift_node/templates/openshift.docker.node.dep.service b/roles/openshift_node/templates/openshift.docker.node.dep.service index f66a78479..0fb34cffd 100644 --- a/roles/openshift_node/templates/openshift.docker.node.dep.service +++ b/roles/openshift_node/templates/openshift.docker.node.dep.service @@ -6,6 +6,6 @@ Before={{ openshift.common.service_type }}-node.service [Service] -ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" +ExecStart=/bin/bash -c "if [[ -f /usr/bin/docker-current ]]; then echo \"DOCKER_ADDTL_BIND_MOUNTS=--volume=/usr/bin/docker-current:/usr/bin/docker-current:ro --volume=/etc/sysconfig/docker:/etc/sysconfig/docker:ro\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; else echo \"#DOCKER_ADDTL_BIND_MOUNTS=\" > /etc/sysconfig/{{ openshift.common.service_type }}-node-dep; fi" ExecStop= SyslogIdentifier={{ openshift.common.service_type }}-node-dep -- cgit v1.2.3 From a7b4676ee4a5388efc8b801a79c62e5e7627c467 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Tue, 14 Jun 2016 10:24:59 -0300 Subject: Attempt to fix containerized node start failure with Docker 1.10. It appears that in some situations (can't reliably reproduce yet), node will fail to start. This appears to be related to the node-dep service and possibly it's environment file. This file is also an EnvironmentFile for the node service, but it's only created by the node-dep service, and it looks like it may try to read it's environment before the node-dep service has fully started and created the file. Workaround with a an explicit service start. --- roles/openshift_node/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 657e99e87..242437a85 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -110,6 +110,10 @@ changed_when: false when: openshift.common.is_containerized | bool +- name: Start and enable node dep + service: name={{ openshift.common.service_type }}-node-dep enabled=yes state=started + when: openshift.common.is_containerized | bool + - name: Start and enable node service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: node_start_result -- cgit v1.2.3 From 61bb9c087b69521c6b93f93913b052893ca61d75 Mon Sep 17 00:00:00 2001 From: Devan Goodwin Date: Tue, 14 Jun 2016 16:47:55 -0300 Subject: Fix no proxy hostnames during upgrade. This value not being set was causing missing hostnames in the sysconfig files with NO_PROXY. This is not the same way we set it during config playbooks, they use vars definitions but this is too difficult in upgrade as there are too many roles that might need it set. --- .../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml index 6bff16674..27b3ece96 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml @@ -8,6 +8,18 @@ - openshift_facts - openshift_repos +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_config + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and + openshift_generate_no_proxy_hosts | default(True) | bool }}" + - name: Evaluate additional groups for upgrade hosts: localhost connection: local -- cgit v1.2.3 From f9ecec42f51f7409322d73995584db1755fbe999 Mon Sep 17 00:00:00 2001 From: talset Date: Wed, 15 Jun 2016 13:55:30 +0200 Subject: Fix uninstall.yml indentation for deamon-reload * command: systemctl daemon-reload need to be at the task level, not in the with_items --- playbooks/adhoc/uninstall.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index a141b3303..3d6de2d17 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -288,11 +288,11 @@ - /usr/local/bin/oc - /usr/local/bin/kubectl - # Since we are potentially removing the systemd unit files for separated - # master-api and master-controllers services, so we need to reload the - # systemd configuration manager - - name: Reload systemd manager configuration - command: systemctl daemon-reload + # Since we are potentially removing the systemd unit files for separated + # master-api and master-controllers services, so we need to reload the + # systemd configuration manager + - name: Reload systemd manager configuration + command: systemctl daemon-reload - hosts: etcd become: yes -- cgit v1.2.3 From 02d4d3ebea97df7fb8ae33233ffb0c5fef3c5bda Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 16 Jun 2016 17:25:02 -0400 Subject: Stop dumping debug output, re-try startng the node once --- roles/openshift_node/tasks/main.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 242437a85..b5393e3cf 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -118,14 +118,10 @@ service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: node_start_result ignore_errors: yes - -- name: Check logs on failure - command: journalctl -xe - register: node_failure - when: node_start_result | failed - -- name: Dump failure information - debug: var=node_failure + +- name: Start and enable node again + service: name={{ openshift.common.service_type }}-node enabled=yes state=started + register: node_start_result when: node_start_result | failed - set_fact: -- cgit v1.2.3 From 555ab6db54423fde7440e8ca4178d44a442f32f2 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Fri, 17 Jun 2016 11:59:54 -0400 Subject: Add 30 second pause before retrying to start the node --- roles/openshift_node/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index b5393e3cf..6aac0dc21 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -119,6 +119,11 @@ register: node_start_result ignore_errors: yes +- name: Wait 30 seconds for docker initialization whenever node has failed + pause: + seconds: 30 + when: node_start_result | failed + - name: Start and enable node again service: name={{ openshift.common.service_type }}-node enabled=yes state=started register: node_start_result -- cgit v1.2.3 From ed28e9a5929aaa305d5d9db38da96887184b3338 Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Thu, 19 May 2016 11:17:32 -0400 Subject: If registry_url != registry.access.redhat.com then modify image streams --- playbooks/common/openshift-cluster/additional_config.yml | 1 + playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml | 1 + playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml | 1 + playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml | 1 + playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml | 1 + roles/openshift_examples/defaults/main.yml | 2 ++ roles/openshift_examples/tasks/main.yml | 5 +++++ 7 files changed, 12 insertions(+) diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index ebddc7841..a34322754 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -17,6 +17,7 @@ - role: openshift_master_cluster when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" when: openshift.common.install_examples | bool - role: openshift_cluster_metrics when: openshift.common.use_cluster_metrics | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml index 5b2bf9f93..e31e7f8a3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml @@ -109,5 +109,6 @@ vars: openshift_examples_import_command: "update" openshift_deployment_type: "{{ deployment_type }}" + registry_url: "{{ openshift.master.registry_url }}" roles: - openshift_examples diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml index 3a4c58e43..c3c1240d8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml @@ -569,6 +569,7 @@ # Update the existing templates - role: openshift_examples openshift_examples_import_command: replace + registry_url: "{{ openshift.master.registry_url }}" pre_tasks: - name: Collect all routers command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml index 196393b2a..f030eed18 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml @@ -19,6 +19,7 @@ # Update the existing templates - role: openshift_examples openshift_examples_import_command: replace + registry_url: "{{ openshift.master.registry_url }}" pre_tasks: - name: Collect all routers command: > diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml index 31e76805c..c16965a35 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml @@ -19,6 +19,7 @@ - openshift_examples # Update the existing templates - role: openshift_examples + registry_url: "{{ openshift.master.registry_url }}" openshift_examples_import_command: replace pre_tasks: - name: Collect all routers diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index 976ff7702..d88014bea 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -20,3 +20,5 @@ infrastructure_origin_base: "{{ examples_base }}/infrastructure-templates/origin infrastructure_enterprise_base: "{{ examples_base }}/infrastructure-templates/enterprise" openshift_examples_import_command: "create" +registry_url: "" +registry_host: "{{ registry_url.split('/')[0] if '.' in registry_url.split('/')[0] else '' }}" \ No newline at end of file diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index a5731be09..fb10188f2 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -4,6 +4,11 @@ src: "examples/{{ content_version }}/" dest: "{{ examples_base }}/" +- name: Modify registry paths if registry_url is not registry.access.redhat.com + shell: > + find {{ examples_base }} -type f | xargs -n 1 sed -i 's|registry.access.redhat.com|{{ registry_host | quote }}|g' + when: registry_host != '' + # RHEL and Centos image streams are mutually exclusive - name: Import RHEL streams command: > -- cgit v1.2.3 From 8d3e9489aca1088687583dec74c365cd8412fcec Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 22 Jun 2016 10:21:15 -0400 Subject: Update logging and metrics templates --- roles/openshift_examples/examples-sync.sh | 2 +- .../enterprise/metrics-deployer.yaml | 10 +- .../origin/logging-deployer.yaml | 146 ++++++++++++--------- .../origin/metrics-deployer.yaml | 6 + 4 files changed, 101 insertions(+), 63 deletions(-) diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index f9d194909..24c18f2b8 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -41,7 +41,7 @@ wget https://raw.githubusercontent.com/jboss-fuse/application-templates/master/f wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml -wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml +wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml popd diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml index 67e49f327..c4bf37b63 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/enterprise/metrics-deployer.yaml @@ -54,6 +54,8 @@ objects: value: ${IMAGE_VERSION} - name: MASTER_URL value: ${MASTER_URL} + - name: MODE + value: ${MODE} - name: REDEPLOY value: ${REDEPLOY} - name: USE_PERSISTENT_STORAGE @@ -66,6 +68,8 @@ objects: value: ${CASSANDRA_PV_SIZE} - name: METRIC_DURATION value: ${METRIC_DURATION} + - name: METRIC_RESOLUTION + value: ${METRIC_RESOLUTION} dnsPolicy: ClusterFirst restartPolicy: Never serviceAccount: metrics-deployer @@ -83,7 +87,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.2.0" + value: "3.2.1" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL @@ -112,3 +116,7 @@ parameters: description: "How many days metrics should be stored for." name: METRIC_DURATION value: "7" +- + description: "How often metrics should be gathered. Defaults value of '10s' for 10 seconds" + name: METRIC_RESOLUTION + value: "10s" diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml index fd5841db7..77ffee7f9 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/logging-deployer.yaml @@ -10,7 +10,8 @@ items: description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." tags: "infrastructure" objects: - - apiVersion: v1 + - + apiVersion: v1 kind: ServiceAccount name: logging-deployer metadata: @@ -19,8 +20,6 @@ items: logging-infra: deployer provider: openshift component: deployer - secrets: - - name: logging-deployer - apiVersion: v1 kind: ServiceAccount @@ -67,18 +66,39 @@ items: - watch - delete - update + - + apiVersion: v1 + kind: RoleBinding + metadata: + name: logging-deployer-edit-role + roleRef: + kind: ClusterRole + name: edit + subjects: + - kind: ServiceAccount + name: logging-deployer + - + apiVersion: v1 + kind: RoleBinding + metadata: + name: logging-deployer-dsadmin-role + roleRef: + kind: ClusterRole + name: daemonset-admin + subjects: + - kind: ServiceAccount + name: logging-deployer - apiVersion: "v1" kind: "Template" metadata: name: logging-deployer-template annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account and 'logging-deployer' secret." + description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." tags: "infrastructure" labels: logging-infra: deployer provider: openshift - component: deployer objects: - apiVersion: v1 @@ -91,9 +111,6 @@ items: imagePullPolicy: Always name: deployer volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - name: empty mountPath: /etc/deploy env: @@ -125,6 +142,8 @@ items: value: ${ES_PVC_SIZE} - name: ES_PVC_PREFIX value: ${ES_PVC_PREFIX} + - name: ES_PVC_DYNAMIC + value: ${ES_PVC_DYNAMIC} - name: ES_CLUSTER_SIZE value: ${ES_CLUSTER_SIZE} - name: ES_NODE_QUORUM @@ -141,6 +160,8 @@ items: value: ${ES_OPS_PVC_SIZE} - name: ES_OPS_PVC_PREFIX value: ${ES_OPS_PVC_PREFIX} + - name: ES_OPS_PVC_DYNAMIC + value: ${ES_OPS_PVC_DYNAMIC} - name: ES_OPS_CLUSTER_SIZE value: ${ES_OPS_CLUSTER_SIZE} - name: ES_OPS_NODE_QUORUM @@ -173,130 +194,133 @@ items: volumes: - name: empty emptyDir: {} - - name: secret - secret: - secretName: logging-deployer parameters: - - description: "If true, set up to use a second ES cluster for ops logs." + description: "The mode that the deployer runs in." + name: MODE + value: "install" + - + description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' + name: IMAGE_PREFIX + value: "docker.io/openshift/origin-" + - + description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' + name: IMAGE_VERSION + value: "latest" + - + description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." + name: IMAGE_PULL_SECRET + - + description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" + name: INSECURE_REGISTRY + value: "false" + - + description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." name: ENABLE_OPS_CLUSTER value: "false" - - description: "External hostname where clients will reach kibana" + description: "(Deprecated) External hostname where clients will reach kibana" name: KIBANA_HOSTNAME - required: true + value: "kibana.example.com" - - description: "External hostname at which admins will visit the ops Kibana." + description: "(Deprecated) External hostname at which admins will visit the ops Kibana." name: KIBANA_OPS_HOSTNAME value: kibana-ops.example.com - - description: "External URL for the master, for OAuth purposes" + description: "(Deprecated) External URL for the master, for OAuth purposes" name: PUBLIC_MASTER_URL - required: true + value: "https://localhost:8443" - - description: "Internal URL for the master, for authentication retrieval" + description: "(Deprecated) Internal URL for the master, for authentication retrieval" name: MASTER_URL value: "https://kubernetes.default.svc.cluster.local" - - description: "How many instances of ElasticSearch to deploy." + description: "(Deprecated) How many instances of ElasticSearch to deploy." name: ES_CLUSTER_SIZE - required: true + value: "1" - - description: "Amount of RAM to reserve per ElasticSearch instance." + description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." name: ES_INSTANCE_RAM value: "8G" - - description: "Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." + description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." name: ES_PVC_SIZE - - description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." + description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." name: ES_PVC_PREFIX value: "logging-es-" - - description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' + name: ES_PVC_DYNAMIC + - + description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." name: ES_NODE_QUORUM - - description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." + description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." name: ES_RECOVER_AFTER_NODES - - description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." + description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." name: ES_RECOVER_EXPECTED_NODES - - description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart." + description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." name: ES_RECOVER_AFTER_TIME value: "5m" - - description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." + description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." name: ES_OPS_CLUSTER_SIZE - - description: "Amount of RAM to reserve per ops ElasticSearch instance." + description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." name: ES_OPS_INSTANCE_RAM value: "8G" - - description: "Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." + description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." name: ES_OPS_PVC_SIZE - - description: "Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." + description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." name: ES_OPS_PVC_PREFIX value: "logging-es-ops-" - - description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." + description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' + name: ES_OPS_PVC_DYNAMIC + - + description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." name: ES_OPS_NODE_QUORUM - - description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." + description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." name: ES_OPS_RECOVER_AFTER_NODES - - description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." + description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." name: ES_OPS_RECOVER_EXPECTED_NODES - - description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." + description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." name: ES_OPS_RECOVER_AFTER_TIME value: "5m" - - description: "The nodeSelector used for the Fluentd DaemonSet." + description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." name: FLUENTD_NODESELECTOR value: "logging-infra-fluentd=true" - - description: "Node selector Elasticsearch cluster (label=value)." + description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." name: ES_NODESELECTOR value: "" - - description: "Node selector Elasticsearch operations cluster (label=value)." + description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." name: ES_OPS_NODESELECTOR value: "" - - description: "Node selector Kibana cluster (label=value)." + description: "(Deprecated) Node selector Kibana cluster (label=value)." name: KIBANA_NODESELECTOR value: "" - - description: "Node selector Kibana operations cluster (label=value)." + description: "(Deprecated) Node selector Kibana operations cluster (label=value)." name: KIBANA_OPS_NODESELECTOR value: "" - - description: "Node selector Curator (label=value)." + description: "(Deprecated) Node selector Curator (label=value)." name: CURATOR_NODESELECTOR value: "" - - description: "Node selector operations Curator (label=value)." + description: "(Deprecated) Node selector operations Curator (label=value)." name: CURATOR_OPS_NODESELECTOR value: "" - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" - - - description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" - - - description: 'Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry.' - name: IMAGE_PULL_SECRET - - - description: 'Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)' - name: INSECURE_REGISTRY - value: "false" diff --git a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml index 8fb594ce8..89639fd67 100644 --- a/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml +++ b/roles/openshift_examples/files/examples/v1.2/infrastructure-templates/origin/metrics-deployer.yaml @@ -34,9 +34,11 @@ objects: metadata: generateName: metrics-deployer- spec: + securityContext: {} containers: - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} name: deployer + securityContext: {} volumeMounts: - name: secret mountPath: /secret @@ -48,6 +50,10 @@ objects: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: IMAGE_PREFIX value: ${IMAGE_PREFIX} - name: IMAGE_VERSION -- cgit v1.2.3 From c0b25fcfcb3e0e0454c0e7ee6116725821fbdfcb Mon Sep 17 00:00:00 2001 From: Scott Dodson Date: Wed, 22 Jun 2016 10:21:57 -0400 Subject: Update the rest of the templates --- .../db-templates/mongodb-ephemeral-template.json | 2 +- .../db-templates/mongodb-persistent-template.json | 2 +- .../db-templates/mysql-ephemeral-template.json | 2 +- .../db-templates/mysql-persistent-template.json | 47 +++-------- .../postgresql-ephemeral-template.json | 2 +- .../postgresql-persistent-template.json | 2 +- .../quickstart-templates/django-postgresql.json | 2 +- .../jenkins-ephemeral-template.json | 94 +++++++++++---------- .../jenkins-persistent-template.json | 96 ++++++++++++---------- 9 files changed, 119 insertions(+), 130 deletions(-) diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json index 0e618624b..9a935be5e 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-ephemeral-template.json @@ -85,7 +85,7 @@ "containers": [ { "name": "mongodb", - "image": "mongodb", + "image": " ", "ports": [ { "containerPort": 27017, diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json index 07290b1ea..4f73d00cc 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mongodb-persistent-template.json @@ -102,7 +102,7 @@ "containers": [ { "name": "mongodb", - "image": "mongodb", + "image": " ", "ports": [ { "containerPort": 27017, diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json index 1457d288c..5f133b946 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-ephemeral-template.json @@ -85,7 +85,7 @@ "containers": [ { "name": "mysql", - "image": "mysql", + "image": " ", "ports": [ { "containerPort": 3306, diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json index e39ee57c8..88d8c3940 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/mysql-persistent-template.json @@ -3,7 +3,6 @@ "apiVersion": "v1", "metadata": { "name": "mysql-persistent", - "creationTimestamp": null, "annotations": { "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.", "iconClass": "icon-mysql-database", @@ -15,28 +14,18 @@ "kind": "Service", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "name": "${DATABASE_SERVICE_NAME}" }, "spec": { "ports": [ { "name": "mysql", - "protocol": "TCP", - "port": 3306, - "targetPort": 3306, - "nodePort": 0 + "port": 3306 } ], "selector": { "name": "${DATABASE_SERVICE_NAME}" - }, - "portalIP": "", - "type": "ClusterIP", - "sessionAffinity": "None" - }, - "status": { - "loadBalancer": {} + } } }, { @@ -60,8 +49,7 @@ "kind": "DeploymentConfig", "apiVersion": "v1", "metadata": { - "name": "${DATABASE_SERVICE_NAME}", - "creationTimestamp": null + "name": "${DATABASE_SERVICE_NAME}" }, "spec": { "strategy": { @@ -79,8 +67,7 @@ "kind": "ImageStreamTag", "name": "mysql:latest", "namespace": "${NAMESPACE}" - }, - "lastTriggeredImage": "" + } } }, { @@ -93,7 +80,6 @@ }, "template": { "metadata": { - "creationTimestamp": null, "labels": { "name": "${DATABASE_SERVICE_NAME}" } @@ -102,11 +88,10 @@ "containers": [ { "name": "mysql", - "image": "mysql", + "image": " ", "ports": [ { - "containerPort": 3306, - "protocol": "TCP" + "containerPort": 3306 } ], "readinessProbe": { @@ -149,13 +134,7 @@ "mountPath": "/var/lib/mysql/data" } ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {}, - "securityContext": { - "capabilities": {}, - "privileged": false - } + "imagePullPolicy": "IfNotPresent" } ], "volumes": [ @@ -165,13 +144,10 @@ "claimName": "${DATABASE_SERVICE_NAME}" } } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" + ] } } - }, - "status": {} + } } ], "parameters": [ @@ -179,7 +155,8 @@ "name": "MEMORY_LIMIT", "displayName": "Memory Limit", "description": "Maximum amount of memory the container can use.", - "value": "512Mi" + "value": "512Mi", + "required": true }, { "name": "NAMESPACE", diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json index 39a71f25c..e90244a6b 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-ephemeral-template.json @@ -85,7 +85,7 @@ "containers": [ { "name": "postgresql", - "image": "postgresql", + "image": " ", "ports": [ { "containerPort": 5432, diff --git a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json index 347e01de3..7b05076a5 100644 --- a/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.2/db-templates/postgresql-persistent-template.json @@ -102,7 +102,7 @@ "containers": [ { "name": "postgresql", - "image": "postgresql", + "image": " ", "ports": [ { "containerPort": 5432, diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json index f044152b3..dda16ecfa 100644 --- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/django-postgresql.json @@ -437,7 +437,7 @@ }, { "name": "DJANGO_SECRET_KEY", - "displayName": "Djange Secret Key", + "displayName": "Django Secret Key", "description": "Set this to a long random string.", "generate": "expression", "from": "[\\w]{50}" diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json index 67fce4a46..d1ae6de90 100644 --- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-ephemeral-template.json @@ -11,36 +11,11 @@ } }, "objects": [ - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null - }, - "spec": { - "ports": [ - { - "name": "web", - "protocol": "TCP", - "port": 8080, - "targetPort": 8080, - "nodePort": 0 - } - ], - "selector": { - "name": "${JENKINS_SERVICE_NAME}" - }, - "portalIP": "", - "type": "ClusterIP", - "sessionAffinity": "None" - } - }, { "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "jenkins", + "name": "${JENKINS_SERVICE_NAME}", "creationTimestamp": null }, "spec": { @@ -77,7 +52,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "jenkins:latest", + "name": "${JENKINS_IMAGE_STREAM_TAG}", "namespace": "${NAMESPACE}" }, "lastTriggeredImage": "" @@ -102,7 +77,7 @@ "containers": [ { "name": "jenkins", - "image": "JENKINS_IMAGE", + "image": " ", "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, @@ -126,10 +101,10 @@ } ], "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } - }, + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, "volumeMounts": [ { "name": "${JENKINS_SERVICE_NAME}-data", @@ -158,21 +133,34 @@ } } } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "web", + "protocol": "TCP", + "port": 8080, + "targetPort": 8080, + "nodePort": 0 + } + ], + "selector": { + "name": "${JENKINS_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + } } ], "parameters": [ - { - "name": "MEMORY_LIMIT", - "displayName": "Memory Limit", - "description": "Maximum amount of memory the container can use.", - "value": "512Mi" - }, - { - "name": "NAMESPACE", - "displayName": "Namespace", - "description": "The OpenShift Namespace where the ImageStream resides.", - "value": "openshift" - }, { "name": "JENKINS_SERVICE_NAME", "displayName": "Jenkins Service Name", @@ -185,6 +173,24 @@ "description": "Password for the Jenkins 'admin' user.", "generate": "expression", "value": "password" + }, + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi" + }, + { + "name": "NAMESPACE", + "displayName": "Jenkins ImageStream Namespace", + "description": "The OpenShift Namespace where the Jenkins ImageStream resides.", + "value": "openshift" + }, + { + "name": "JENKINS_IMAGE_STREAM_TAG", + "displayName": "Jenkins ImageStreamTag", + "description": "Name of the ImageStreamTag to be used for the Jenkins image.", + "value": "jenkins:latest" } ], "labels": { diff --git a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json index ef04b4482..c7bc3f2fa 100644 --- a/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v1.2/quickstart-templates/jenkins-persistent-template.json @@ -11,36 +11,11 @@ } }, "objects": [ - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "${JENKINS_SERVICE_NAME}", - "creationTimestamp": null - }, - "spec": { - "ports": [ - { - "name": "web", - "protocol": "TCP", - "port": 8080, - "targetPort": 8080, - "nodePort": 0 - } - ], - "selector": { - "name": "${JENKINS_SERVICE_NAME}" - }, - "portalIP": "", - "type": "ClusterIP", - "sessionAffinity": "None" - } - }, { "kind": "Route", "apiVersion": "v1", "metadata": { - "name": "jenkins", + "name": "${JENKINS_SERVICE_NAME}", "creationTimestamp": null }, "spec": { @@ -82,7 +57,7 @@ }, "spec": { "strategy": { - "type": "Recreate" + "type": "Recreate" }, "triggers": [ { @@ -94,7 +69,7 @@ ], "from": { "kind": "ImageStreamTag", - "name": "jenkins:latest", + "name": "${JENKINS_IMAGE_STREAM_TAG}", "namespace": "${NAMESPACE}" }, "lastTriggeredImage": "" @@ -119,7 +94,7 @@ "containers": [ { "name": "jenkins", - "image": "JENKINS_IMAGE", + "image": " ", "readinessProbe": { "timeoutSeconds": 3, "initialDelaySeconds": 3, @@ -143,10 +118,10 @@ } ], "resources": { - "limits": { - "memory": "${MEMORY_LIMIT}" - } - }, + "limits": { + "memory": "${MEMORY_LIMIT}" + } + }, "volumeMounts": [ { "name": "${JENKINS_SERVICE_NAME}-data", @@ -175,21 +150,34 @@ } } } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${JENKINS_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "name": "web", + "protocol": "TCP", + "port": 8080, + "targetPort": 8080, + "nodePort": 0 + } + ], + "selector": { + "name": "${JENKINS_SERVICE_NAME}" + }, + "portalIP": "", + "type": "ClusterIP", + "sessionAffinity": "None" + } } ], "parameters": [ - { - "name": "MEMORY_LIMIT", - "displayName": "Memory Limit", - "description": "Maximum amount of memory the container can use.", - "value": "512Mi" - }, - { - "name": "NAMESPACE", - "displayName": "Namespace", - "description": "The OpenShift Namespace where the ImageStream resides.", - "value": "openshift" - }, { "name": "JENKINS_SERVICE_NAME", "displayName": "Jenkins Service Name", @@ -203,12 +191,30 @@ "generate": "expression", "value": "password" }, + { + "name": "MEMORY_LIMIT", + "displayName": "Memory Limit", + "description": "Maximum amount of memory the container can use.", + "value": "512Mi" + }, { "name": "VOLUME_CAPACITY", "displayName": "Volume Capacity", "description": "Volume space available for data, e.g. 512Mi, 2Gi.", "value": "1Gi", "required": true + }, + { + "name": "NAMESPACE", + "displayName": "Jenkins ImageStream Namespace", + "description": "The OpenShift Namespace where the Jenkins ImageStream resides.", + "value": "openshift" + }, + { + "name": "JENKINS_IMAGE_STREAM_TAG", + "displayName": "Jenkins ImageStreamTag", + "description": "Name of the ImageStreamTag to be used for the Jenkins image.", + "value": "jenkins:latest" } ], "labels": { -- cgit v1.2.3