summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTomas Sedovic <tomas@sedovic.cz>2017-10-27 17:59:44 +0200
committerTomas Sedovic <tomas@sedovic.cz>2017-11-07 14:35:46 +1100
commit94413931c26e47fd9acd3c0d20bbcfd1704755d1 (patch)
treef34cf477a608dadaf544a25b808597085fc9c53e
parentb1e4629ae3e86c59503ac29a781a62a8e75c14f2 (diff)
downloadopenshift-94413931c26e47fd9acd3c0d20bbcfd1704755d1.tar.gz
openshift-94413931c26e47fd9acd3c0d20bbcfd1704755d1.tar.bz2
openshift-94413931c26e47fd9acd3c0d20bbcfd1704755d1.tar.xz
openshift-94413931c26e47fd9acd3c0d20bbcfd1704755d1.zip
Remove the post-install and scale-up playbooks
They're not necessary for the initial PR so let's add them properly later.
-rw-r--r--playbooks/openstack/openshift-cluster/post-install.yml57
-rw-r--r--playbooks/openstack/openshift-cluster/scale-up.yaml70
2 files changed, 0 insertions, 127 deletions
diff --git a/playbooks/openstack/openshift-cluster/post-install.yml b/playbooks/openstack/openshift-cluster/post-install.yml
deleted file mode 100644
index 7b1744a18..000000000
--- a/playbooks/openstack/openshift-cluster/post-install.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-- hosts: OSEv3
- gather_facts: False
- become: True
- tasks:
- - name: Save iptables rules to a backup file
- when: openshift_use_flannel|default(False)|bool
- shell: iptables-save > /etc/sysconfig/iptables.orig-$(date +%Y%m%d%H%M%S)
-
-# Enable iptables service on app nodes to persist custom rules (flannel SDN)
-# FIXME(bogdando) w/a https://bugzilla.redhat.com/show_bug.cgi?id=1490820
-- hosts: app
- gather_facts: False
- become: True
- vars:
- os_firewall_allow:
- - service: dnsmasq tcp
- port: 53/tcp
- - service: dnsmasq udp
- port: 53/udp
- tasks:
- - when: openshift_use_flannel|default(False)|bool
- block:
- - include_role:
- name: os_firewall
- - include_role:
- name: lib_os_firewall
- - name: set allow rules for dnsmasq
- os_firewall_manage_iptables:
- name: "{{ item.service }}"
- action: add
- protocol: "{{ item.port.split('/')[1] }}"
- port: "{{ item.port.split('/')[0] }}"
- with_items: "{{ os_firewall_allow }}"
-
-- hosts: OSEv3
- gather_facts: False
- become: True
- tasks:
- - name: Apply post-install iptables hacks for Flannel SDN (the best effort)
- when: openshift_use_flannel|default(False)|bool
- block:
- - name: set allow/masquerade rules for for flannel/docker
- shell: >-
- (iptables-save | grep -q custom-flannel-docker-1) ||
- iptables -A DOCKER -w
- -p all -j ACCEPT
- -m comment --comment "custom-flannel-docker-1";
- (iptables-save | grep -q custom-flannel-docker-2) ||
- iptables -t nat -A POSTROUTING -w
- -o {{flannel_interface|default('eth1')}}
- -m comment --comment "custom-flannel-docker-2"
- -j MASQUERADE
-
- # NOTE(bogdando) the rules will not be restored, when iptables service unit is disabled & masked
- - name: Persist in-memory iptables rules (w/o dynamic KUBE rules)
- shell: iptables-save | grep -v KUBE > /etc/sysconfig/iptables
diff --git a/playbooks/openstack/openshift-cluster/scale-up.yaml b/playbooks/openstack/openshift-cluster/scale-up.yaml
deleted file mode 100644
index f99ff1349..000000000
--- a/playbooks/openstack/openshift-cluster/scale-up.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-# Get the needed information about the current deployment
-- hosts: masters[0]
- tasks:
- - name: Get number of app nodes
- shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l
- register: oc_old_num_nodes
- - name: Get names of app nodes
- shell: oc get nodes -l autoscaling=app --no-headers=true | cut -f1 -d " "
- register: oc_old_app_nodes
-
-- hosts: localhost
- tasks:
- # Since both number and names of app nodes are to be removed
- # localhost variables for these values need to be set
- - name: Store old number and names of app nodes locally (if there is an existing deployment)
- when: '"masters" in groups'
- register: set_fact_result
- set_fact:
- oc_old_num_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_num_nodes'].stdout }}"
- oc_old_app_nodes: "{{ hostvars[groups['masters'][0]]['oc_old_app_nodes'].stdout_lines }}"
-
- - name: Set default values for old app nodes (if there is no existing deployment)
- when: 'set_fact_result | skipped'
- set_fact:
- oc_old_num_nodes: 0
- oc_old_app_nodes: []
-
- # Set how many nodes are to be added (1 by default)
- - name: Set how many nodes are to be added
- set_fact:
- increment_by: 1
- - name: Check that the number corresponds to scaling up (not down)
- assert:
- that: 'increment_by | int >= 1'
- msg: >
- FAIL: The value of increment_by must be at least 1
- (but it is {{ increment_by | int }}).
- - name: Update openstack_num_nodes variable
- set_fact:
- openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}"
-
-# Run provision.yaml with higher number of nodes to create a new app-node VM
-- include: provision.yml
-
-# Run config.yml to perform openshift installation
-
-# Creating a new deployment by the full installation
-- include: install.yml
- when: 'not groups["new_nodes"] | list'
-
-# Scaling up existing deployment
-- include: "../../byo/openshift-node/scaleup.yml"
- vars:
- openshift_ansible_dir: ../../../../openshift-ansible
- when: 'groups["new_nodes"] | list'
-
-# Post-verification: Verify new number of nodes
-- hosts: masters[0]
- tasks:
- - name: Get number of nodes
- shell: oc get nodes -l autoscaling=app --no-headers=true | wc -l
- register: oc_new_num_nodes
- - name: Check that the actual result matches the defined value
- assert:
- that: 'oc_new_num_nodes.stdout | int == (hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int)'
- msg: >
- FAIL: Number of application nodes has not been increased accordingly
- (it should be {{ hostvars["localhost"]["oc_old_num_nodes"] | int + hostvars["localhost"]["increment_by"] | int }}
- but it is {{ oc_new_num_nodes.stdout | int }}).