summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--openshift-ansible.spec7
-rw-r--r--playbooks/adhoc/contiv/delete_contiv.yml2
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml124
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml15
-rw-r--r--roles/contiv/defaults/main.yml25
-rw-r--r--roles/contiv/meta/main.yml2
-rw-r--r--roles/contiv/tasks/default_network.yml45
-rw-r--r--roles/contiv/tasks/netmaster.yml2
-rw-r--r--roles/contiv/tasks/netplugin_iptables.yml33
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml5
-rw-r--r--roles/contiv/tasks/pkgMgrInstallers/centos-install.yml18
-rw-r--r--roles/contiv/templates/netplugin.j24
-rw-r--r--roles/contiv_auth_proxy/README.md29
-rw-r--r--roles/contiv_auth_proxy/defaults/main.yml11
-rw-r--r--roles/contiv_auth_proxy/files/auth-proxy.service13
-rw-r--r--roles/contiv_auth_proxy/handlers/main.yml2
-rw-r--r--roles/contiv_auth_proxy/tasks/cleanup.yml10
-rw-r--r--roles/contiv_auth_proxy/tasks/main.yml37
-rw-r--r--roles/contiv_auth_proxy/templates/auth_proxy.j236
-rw-r--r--roles/contiv_auth_proxy/tests/inventory1
-rw-r--r--roles/contiv_auth_proxy/tests/test.yml5
-rw-r--r--roles/contiv_auth_proxy/vars/main.yml2
-rw-r--r--roles/contiv_facts/defaults/main.yaml3
-rw-r--r--roles/etcd_migrate/README.md53
-rw-r--r--roles/etcd_migrate/defaults/main.yml3
-rw-r--r--roles/etcd_migrate/meta/main.yml17
-rw-r--r--roles/etcd_migrate/tasks/check.yml55
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_health.yml23
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_status.yml32
-rw-r--r--roles/etcd_migrate/tasks/configure.yml13
-rw-r--r--roles/etcd_migrate/tasks/main.yml25
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml53
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/glusterfs.yml2
-rw-r--r--roles/openshift_master/defaults/main.yml1
-rw-r--r--roles/openshift_master/tasks/main.yml20
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j26
-rw-r--r--roles/openshift_storage_glusterfs/README.md3
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml10
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml11
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml5
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml11
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml51
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml7
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/main.yml2
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j25
56 files changed, 786 insertions, 122 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index ceb11a9e9..98e277c19 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.116-1 ./
+3.6.117-1 ./
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 0b9531269..7b5587294 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.116
+Version: 3.6.117
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,11 @@ Atomic OpenShift Utilities includes
%changelog
+* Mon Jun 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.117-1
+- Run storage upgrade pre and post master upgrade (rteague@redhat.com)
+- Introduce etcd migrate role (jchaloup@redhat.com)
+- Add support for rhel, aci, vxlan (srampal@cisco.com)
+
* Sun Jun 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.116-1
- PAPR: define openshift_image_tag via command line (rhcarvalho@gmail.com)
- Ensure only one ES pod per PV (peter.portante@redhat.com)
diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml
index 91948c72e..eec6c23a7 100644
--- a/playbooks/adhoc/contiv/delete_contiv.yml
+++ b/playbooks/adhoc/contiv/delete_contiv.yml
@@ -1,5 +1,5 @@
---
-- name: delete contiv
+- name: Uninstall contiv
hosts: all
gather_facts: False
tasks:
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..fd02e066e
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -0,0 +1,124 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_config
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+# TODO(jchaloup): replace the std_include with something minimal so the entire playbook is faster
+# e.g. I don't need to detect the OCP version, install deps, etc.
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_config)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_config | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_config)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_config | difference(etcd_migration_completed) }}"
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+
+- name: Start masters after etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index b980909eb..5c19df4c5 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -3,6 +3,16 @@
# Upgrade Masters
###############################################################################
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade job storage
+ hosts: oo_first_master
+ tasks:
+ - name: Upgrade job storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=jobs --confirm
+
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
# so we must first make sure this is set correctly before attempting the backup.
@@ -133,6 +143,14 @@
- set_fact:
master_update_complete: True
+- name: Post master upgrade - Upgrade job storage
+ hosts: oo_first_master
+ tasks:
+ - name: Upgrade job storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=jobs --confirm
+
##############################################################################
# Gate on master update complete
##############################################################################
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index e63b03e51..4e7c14e94 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -115,5 +115,3 @@
- include: ../upgrade_nodes.yml
- include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 74c2964aa..45b664d06 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -119,5 +119,3 @@
master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 5d41b84d0..5b9ac9e8f 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -115,5 +115,3 @@
- include: ../upgrade_nodes.yml
- include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index a66fb51ff..a470c7595 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -119,5 +119,3 @@
master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 429460b2c..70108fb7a 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -27,7 +27,17 @@
- name: Set clean install fact
set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists }}"
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- set_fact:
openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
@@ -131,7 +141,8 @@
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- r_openshift_master_clean_install: hostvars[groups.oo_first_master.0].l_clean_install
+ r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
+ r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
- role: nuage_master
when: openshift.common.use_nuage | bool
- role: calico_master
diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml
index 1ccae61f2..8c4d19537 100644
--- a/roles/contiv/defaults/main.yml
+++ b/roles/contiv/defaults/main.yml
@@ -1,12 +1,12 @@
---
# The version of Contiv binaries to use
-contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC
+contiv_version: 1.0.1
# The version of cni binaries
cni_version: v0.4.0
-contiv_default_subnet: "20.1.1.1/24"
-contiv_default_gw: "20.1.1.254"
+contiv_default_subnet: "10.128.0.0/16"
+contiv_default_gw: "10.128.254.254"
# TCP port that Netmaster listens for network connections
netmaster_port: 9999
@@ -69,6 +69,9 @@ netplugin_fwd_mode: bridge
# Contiv fabric mode aci|default
contiv_fabric_mode: default
+# Global VLAN range
+contiv_vlan_range: "2900-3000"
+
# Encapsulation type vlan|vxlan to use for instantiating container networks
contiv_encap_mode: vlan
@@ -78,8 +81,8 @@ netplugin_driver: ovs
# Create a default Contiv network for use by pods
contiv_default_network: true
-# VLAN/ VXLAN tag value to be used for the default network
-contiv_default_network_tag: 1
+# Statically configured tag for default network (if needed)
+contiv_default_network_tag: ""
#SRFIXME (use the openshift variables)
https_proxy: ""
@@ -95,6 +98,9 @@ apic_leaf_nodes: ""
apic_phys_dom: ""
apic_contracts_unrestricted_mode: no
apic_epg_bridge_domain: not_specified
+apic_configure_default_policy: false
+apic_default_external_contract: "uni/tn-common/brc-default"
+apic_default_app_profile: "contiv-infra-app-profile"
is_atomic: False
kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"
master_name: "{{ groups['masters'][0] }}"
@@ -104,3 +110,12 @@ kube_ca_cert: "{{ kube_cert_dir }}/ca.crt"
kube_key: "{{ kube_cert_dir }}/admin.key"
kube_cert: "{{ kube_cert_dir }}/admin.crt"
kube_master_api_port: 8443
+
+# contivh1 default subnet and gateway
+#contiv_h1_subnet_default: "132.1.1.0/24"
+#contiv_h1_gw_default: "132.1.1.1"
+contiv_h1_subnet_default: "10.129.0.0/16"
+contiv_h1_gw_default: "10.129.0.1"
+
+# contiv default private subnet for ext access
+contiv_private_ext_subnet: "10.130.0.0/16"
diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml
index 3223afb6e..da6409f1e 100644
--- a/roles/contiv/meta/main.yml
+++ b/roles/contiv/meta/main.yml
@@ -26,3 +26,5 @@ dependencies:
etcd_url_scheme: http
etcd_peer_url_scheme: http
when: contiv_role == "netmaster"
+- role: contiv_auth_proxy
+ when: (contiv_role == "netmaster") and (contiv_enable_auth_proxy == true)
diff --git a/roles/contiv/tasks/default_network.yml b/roles/contiv/tasks/default_network.yml
index 9cf98bb80..f679443e0 100644
--- a/roles/contiv/tasks/default_network.yml
+++ b/roles/contiv/tasks/default_network.yml
@@ -6,10 +6,53 @@
retries: 9
delay: 10
+- name: Contiv | Set globals
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --fabric-mode {{ contiv_fabric_mode }} --vlan-range {{ contiv_vlan_range }} --fwd-mode {{ netplugin_fwd_mode }} --private-subnet {{ contiv_private_ext_subnet }}'
+
+- name: Contiv | Set arp mode to flood if ACI
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" global set --arp-mode flood'
+ when: contiv_fabric_mode == "aci"
+
- name: Contiv | Check if default-net exists
command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net ls'
register: net_result
- name: Contiv | Create default-net
- command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway={{ contiv_default_gw }} default-net'
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_default_subnet }} -e {{ contiv_encap_mode }} -p {{ contiv_default_network_tag }} --gateway {{ contiv_default_gw }} default-net'
when: net_result.stdout.find("default-net") == -1
+
+- name: Contiv | Create host access infra network for VxLan routing case
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" net create --subnet={{ contiv_h1_subnet_default }} --gateway={{ contiv_h1_gw_default }} --nw-type="infra" contivh1'
+ when: (contiv_encap_mode == "vxlan") and (netplugin_fwd_mode == "routing")
+
+#- name: Contiv | Create an allow-all policy for the default-group
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy create ose-allow-all-policy'
+# when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Set up aci external contract to consume default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -c -a {{ apic_default_external_contract }} oseExtToConsume'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+- name: Contiv | Set up aci external contract to provide default external contract
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" external-contracts create -p -a {{ apic_default_external_contract }} oseExtToProvide'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+- name: Contiv | Create aci default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create default-net default-group'
+ when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Add external contracts to the default-group
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" group create -e oseExtToConsume -e oseExtToProvide default-net default-group'
+ when: (contiv_fabric_mode == "aci") and (apic_configure_default_policy == true)
+
+#- name: Contiv | Add policy rule 1 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d in --action allow ose-allow-all-policy 1'
+# when: contiv_fabric_mode == "aci"
+
+#- name: Contiv | Add policy rule 2 for allow-all policy
+# command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" policy rule-add -d out --action allow ose-allow-all-policy 2'
+# when: contiv_fabric_mode == "aci"
+
+- name: Contiv | Create default aci app profile
+ command: 'netctl --netmaster "http://{{ inventory_hostname }}:{{ netmaster_port }}" app-profile create -g default-group {{ apic_default_app_profile }}'
+ when: contiv_fabric_mode == "aci"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index 5057767b8..acaf7386e 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -23,7 +23,7 @@
line: "{{ hostvars[item]['ansible_' + netmaster_interface].ipv4.address }} netmaster"
state: present
when: hostvars[item]['ansible_' + netmaster_interface].ipv4.address is defined
- with_items: groups['masters']
+ with_items: "{{ groups['masters'] }}"
- name: Netmaster | Create netmaster symlinks
file:
diff --git a/roles/contiv/tasks/netplugin_iptables.yml b/roles/contiv/tasks/netplugin_iptables.yml
index 8c348ac67..184c595c5 100644
--- a/roles/contiv/tasks/netplugin_iptables.yml
+++ b/roles/contiv/tasks/netplugin_iptables.yml
@@ -23,7 +23,36 @@
notify: Save iptables rules
- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "vxlan"
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 8472 -j ACCEPT -m comment --comment "netplugin vxlan 8472"
+ when: iptablesrules.stdout.find("netplugin vxlan 8472") == -1
+ notify: Save iptables rules
- name: Netplugin IPtables | Open vxlan port with iptables
- command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "vxlan"
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 4789 -j ACCEPT -m comment --comment "netplugin vxlan 4789"
+ when: iptablesrules.stdout.find("netplugin vxlan 4789") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow from contivh0
+ command: /sbin/iptables -I FORWARD 1 -i contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD input"
+ when: iptablesrules.stdout.find("contivh0 FORWARD input") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow to contivh0
+ command: /sbin/iptables -I FORWARD 1 -o contivh0 -j ACCEPT -m comment --comment "contivh0 FORWARD output"
+ when: iptablesrules.stdout.find("contivh0 FORWARD output") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow from contivh1
+ command: /sbin/iptables -I FORWARD 1 -i contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD input"
+ when: iptablesrules.stdout.find("contivh1 FORWARD input") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow to contivh1
+ command: /sbin/iptables -I FORWARD 1 -o contivh1 -j ACCEPT -m comment --comment "contivh1 FORWARD output"
+ when: iptablesrules.stdout.find("contivh1 FORWARD output") == -1
+ notify: Save iptables rules
+
+- name: Netplugin IPtables | Allow dns
+ command: /sbin/iptables -I INPUT 1 -p udp --dport 53 -j ACCEPT -m comment --comment "contiv dns"
+ when: iptablesrules.stdout.find("contiv dns") == -1
+ notify: Save iptables rules
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index 2eff1b85f..e0d48e643 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -4,9 +4,10 @@
did_install: false
- include: pkgMgrInstallers/centos-install.yml
- when: ansible_distribution == "CentOS" and not is_atomic
+ when: (ansible_os_family == "RedHat") and
+ not is_atomic
- name: Package Manager | Set fact saying we did CentOS package install
set_fact:
did_install: true
- when: ansible_distribution == "CentOS"
+ when: (ansible_os_family == "RedHat")
diff --git a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
index 51c3d35ac..91e6aadf3 100644
--- a/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
+++ b/roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
@@ -1,13 +1,13 @@
---
-- name: PkgMgr CentOS | Install net-tools pkg for route
+- name: PkgMgr RHEL/CentOS | Install net-tools pkg for route
yum:
pkg=net-tools
state=latest
-- name: PkgMgr CentOS | Get openstack kilo rpm
+- name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
get_url:
- url: https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-2.noarch.rpm
- dest: /tmp/rdo-release-kilo-2.noarch.rpm
+ url: https://repos.fedorapeople.org/repos/openstack/openstack-ocata/rdo-release-ocata-2.noarch.rpm
+ dest: /tmp/rdo-release-ocata-2.noarch.rpm
validate_certs: False
environment:
http_proxy: "{{ http_proxy|default('') }}"
@@ -16,15 +16,15 @@
tags:
- ovs_install
-- name: PkgMgr CentOS | Install openstack kilo rpm
- yum: name=/tmp/rdo-release-kilo-2.noarch.rpm state=present
+- name: PkgMgr RHEL/CentOS | Install openstack ocata rpm
+ yum: name=/tmp/rdo-release-ocata-2.noarch.rpm state=present
tags:
- ovs_install
-- name: PkgMgr CentOS | Install ovs
+- name: PkgMgr RHEL/CentOS | Install ovs
yum:
- pkg=openvswitch
- state=latest
+ pkg=openvswitch-2.5.0-2.el7.x86_64
+ state=present
environment:
http_proxy: "{{ http_proxy|default('') }}"
https_proxy: "{{ https_proxy|default('') }}"
diff --git a/roles/contiv/templates/netplugin.j2 b/roles/contiv/templates/netplugin.j2
index f3d26c037..a4928cc3d 100644
--- a/roles/contiv/templates/netplugin.j2
+++ b/roles/contiv/templates/netplugin.j2
@@ -1,9 +1,7 @@
{% if contiv_encap_mode == "vlan" %}
NETPLUGIN_ARGS='-vlan-if {{ netplugin_interface }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
{% endif %}
-{# Note: Commenting out vxlan encap mode support until it is fully supported
{% if contiv_encap_mode == "vxlan" %}
-NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -e {{contiv_encap_mode}} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
+NETPLUGIN_ARGS='-vtep-ip {{ netplugin_ctrl_ip }} -ctrl-ip {{ netplugin_ctrl_ip }} -plugin-mode kubernetes -cluster-store etcd://{{ etcd_url }}'
{% endif %}
-#}
diff --git a/roles/contiv_auth_proxy/README.md b/roles/contiv_auth_proxy/README.md
new file mode 100644
index 000000000..287b6c148
--- /dev/null
+++ b/roles/contiv_auth_proxy/README.md
@@ -0,0 +1,29 @@
+Role Name
+=========
+
+Role to install Contiv API Proxy and UI
+
+Requirements
+------------
+
+Docker needs to be installed to run the auth proxy container.
+
+Role Variables
+--------------
+
+auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container.
+auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates.
+auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address.
+
+Dependencies
+------------
+
+docker
+
+Example Playbook
+----------------
+
+- hosts: netplugin-node
+ become: true
+ roles:
+ - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:22379 }
diff --git a/roles/contiv_auth_proxy/defaults/main.yml b/roles/contiv_auth_proxy/defaults/main.yml
new file mode 100644
index 000000000..4e637a947
--- /dev/null
+++ b/roles/contiv_auth_proxy/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2"
+auth_proxy_port: 10000
+contiv_certs: "/var/contiv/certs"
+cluster_store: "{{ hostvars[groups['masters'][0]]['ansible_' + netmaster_interface].ipv4.address }}:22379"
+auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
+auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
+auth_proxy_datastore: "{{ cluster_store }}"
+auth_proxy_binaries: "/var/contiv_cache"
+auth_proxy_local_install: False
+auth_proxy_rule_comment: "Contiv auth proxy service"
diff --git a/roles/contiv_auth_proxy/files/auth-proxy.service b/roles/contiv_auth_proxy/files/auth-proxy.service
new file mode 100644
index 000000000..7cd2edff1
--- /dev/null
+++ b/roles/contiv_auth_proxy/files/auth-proxy.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Contiv Proxy and UI
+After=auditd.service systemd-user-sessions.service time-sync.target docker.service
+
+[Service]
+ExecStart=/usr/bin/auth_proxy.sh start
+ExecStop=/usr/bin/auth_proxy.sh stop
+KillMode=control-group
+Restart=on-failure
+RestartSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/contiv_auth_proxy/handlers/main.yml b/roles/contiv_auth_proxy/handlers/main.yml
new file mode 100644
index 000000000..9cb9bea49
--- /dev/null
+++ b/roles/contiv_auth_proxy/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for auth_proxy
diff --git a/roles/contiv_auth_proxy/tasks/cleanup.yml b/roles/contiv_auth_proxy/tasks/cleanup.yml
new file mode 100644
index 000000000..a29659cc9
--- /dev/null
+++ b/roles/contiv_auth_proxy/tasks/cleanup.yml
@@ -0,0 +1,10 @@
+---
+
+- name: stop auth-proxy container
+ service: name=auth-proxy state=stopped
+
+- name: cleanup iptables for auth proxy
+ shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
+ become: true
+ with_items:
+ - "{{ auth_proxy_port }}"
diff --git a/roles/contiv_auth_proxy/tasks/main.yml b/roles/contiv_auth_proxy/tasks/main.yml
new file mode 100644
index 000000000..74e7bf794
--- /dev/null
+++ b/roles/contiv_auth_proxy/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+# tasks file for auth_proxy
+- name: setup iptables for auth proxy
+ shell: >
+ ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \
+ iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
+ become: true
+ with_items:
+ - "{{ auth_proxy_port }}"
+
+# Load the auth-proxy-image from local tar. Ignore any errors to handle the
+# case where the image is not built in
+- name: copy auth-proxy image
+ copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar
+ when: auth_proxy_local_install == True
+
+- name: load auth-proxy image
+ shell: docker load -i /tmp/auth-proxy-image.tar
+ when: auth_proxy_local_install == True
+
+- name: create cert folder for proxy
+ file: path=/var/contiv/certs state=directory
+
+- name: copy shell script for starting auth-proxy
+ template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx
+
+- name: copy cert for starting auth-proxy
+ copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r
+
+- name: copy key for starting auth-proxy
+ copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r
+
+- name: copy systemd units for auth-proxy
+ copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service
+
+- name: start auth-proxy container
+ systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes
diff --git a/roles/contiv_auth_proxy/templates/auth_proxy.j2 b/roles/contiv_auth_proxy/templates/auth_proxy.j2
new file mode 100644
index 000000000..e82e5b4ab
--- /dev/null
+++ b/roles/contiv_auth_proxy/templates/auth_proxy.j2
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+usage="$0 start/stop"
+if [ $# -ne 1 ]; then
+ echo USAGE: $usage
+ exit 1
+fi
+
+case $1 in
+start)
+ set -e
+
+ /usr/bin/docker run --rm \
+ -p 10000:{{ auth_proxy_port }} \
+ --net=host --name=auth-proxy \
+ -e NO_NETMASTER_STARTUP_CHECK=1 \
+ -v /var/contiv:/var/contiv \
+ {{ auth_proxy_image }} \
+ --tls-key-file={{ auth_proxy_key }} \
+ --tls-certificate={{ auth_proxy_cert }} \
+ --data-store-address={{ auth_proxy_datastore }} \
+ --netmaster-address={{ service_vip }}:9999 \
+ --listen-address=:10000
+ ;;
+
+stop)
+ # don't stop on error
+ /usr/bin/docker stop auth-proxy
+ /usr/bin/docker rm -f -v auth-proxy
+ ;;
+
+*)
+ echo USAGE: $usage
+ exit 1
+ ;;
+esac
diff --git a/roles/contiv_auth_proxy/tests/inventory b/roles/contiv_auth_proxy/tests/inventory
new file mode 100644
index 000000000..d18580b3c
--- /dev/null
+++ b/roles/contiv_auth_proxy/tests/inventory
@@ -0,0 +1 @@
+localhost \ No newline at end of file
diff --git a/roles/contiv_auth_proxy/tests/test.yml b/roles/contiv_auth_proxy/tests/test.yml
new file mode 100644
index 000000000..2af3250cd
--- /dev/null
+++ b/roles/contiv_auth_proxy/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - auth_proxy
diff --git a/roles/contiv_auth_proxy/vars/main.yml b/roles/contiv_auth_proxy/vars/main.yml
new file mode 100644
index 000000000..9032766c4
--- /dev/null
+++ b/roles/contiv_auth_proxy/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for auth_proxy
diff --git a/roles/contiv_facts/defaults/main.yaml b/roles/contiv_facts/defaults/main.yaml
index a6c08fa63..7b8150954 100644
--- a/roles/contiv_facts/defaults/main.yaml
+++ b/roles/contiv_facts/defaults/main.yaml
@@ -8,3 +8,6 @@ bin_dir: /usr/bin
ansible_temp_dir: /tmp/.ansible/files
source_type: packageManager
+
+# Whether or not to also install and enable the Contiv auth_proxy
+contiv_enable_auth_proxy: false
diff --git a/roles/etcd_migrate/README.md b/roles/etcd_migrate/README.md
new file mode 100644
index 000000000..369e78ff2
--- /dev/null
+++ b/roles/etcd_migrate/README.md
@@ -0,0 +1,53 @@
+Role Name
+=========
+
+Offline etcd migration of data from v2 to v3
+
+Requirements
+------------
+
+It is expected all consumers of the etcd data are not accessing the data.
+Otherwise the migrated data can be out-of-sync with the v2 and can result in unhealthy etcd cluster.
+
+The role itself is responsible for:
+- checking etcd cluster health and raft status before the migration
+- checking of presence of any v3 data (in that case the migration is stopped)
+- migration of v2 data to v3 data (including attaching leases of keys prefixed with "/kubernetes.io/events" and "/kubernetes.io/masterleases" string)
+- validation of migrated data (all v2 keys and in v3 keys and are set to the identical value)
+
+The migration itself requires an etcd member to be down in the process. Once the migration is done, the etcd member is started.
+
+Role Variables
+--------------
+
+TBD
+
+Dependencies
+------------
+
+- etcd_common
+- lib_utils
+
+Example Playbook
+----------------
+
+```yaml
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - include_role:
+ name: openshift_etcd_migrate
+ vars:
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jan Chaloupka (jchaloup@redhat.com)
diff --git a/roles/etcd_migrate/defaults/main.yml b/roles/etcd_migrate/defaults/main.yml
new file mode 100644
index 000000000..05cf41fbb
--- /dev/null
+++ b/roles/etcd_migrate/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+# Default action when calling this role, choices: check, migrate, configure
+r_etcd_migrate_action: migrate
diff --git a/roles/etcd_migrate/meta/main.yml b/roles/etcd_migrate/meta/main.yml
new file mode 100644
index 000000000..f3cabbef6
--- /dev/null
+++ b/roles/etcd_migrate/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Jan Chaloupka
+ description: Etcd migration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: etcd_common }
+- { role: lib_utils }
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
new file mode 100644
index 000000000..2f07713bc
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -0,0 +1,55 @@
+---
+# Check the cluster is healthy
+- include: check_cluster_health.yml
+
+# Check if the member has v3 data already
+# Run the migration only if the data are v2
+- name: Check if there are any v3 data
+ command: >
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:2379' get "" --from-key --keys-only -w json --limit 1
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcdctl_output
+
+- fail:
+ msg: "Unable to get a number of v3 keys"
+ when: l_etcdctl_output.rc != 0
+
+- fail:
+ msg: "The etcd has at least one v3 key"
+ when: "'count' in (l_etcdctl_output.stdout | from_json) and (l_etcdctl_output.stdout | from_json).count != 0"
+
+
+# TODO(jchaloup): once the until loop can be used over include/block,
+# remove the repetive code
+# - until loop not supported over include statement (nor block)
+# https://github.com/ansible/ansible/issues/17098
+# - with_items not supported over block
+
+# Check the cluster status for the first time
+- include: check_cluster_status.yml
+
+# Check the cluster status for the second time
+- block:
+ - debug:
+ msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}"
+ - name: Wait a while before another check
+ pause:
+ seconds: 5
+ when: not l_etcd_cluster_status_ok | bool
+
+ - include: check_cluster_status.yml
+ when: not l_etcd_cluster_status_ok | bool
+
+
+# Check the cluster status for the third time
+- block:
+ - debug:
+ msg: "l_etcd_cluster_status_ok: {{ l_etcd_cluster_status_ok }}"
+ - name: Wait a while before another check
+ pause:
+ seconds: 5
+ when: not l_etcd_cluster_status_ok | bool
+
+ - include: check_cluster_status.yml
+ when: not l_etcd_cluster_status_ok | bool
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml
new file mode 100644
index 000000000..1abd6a32f
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check_cluster_health.yml
@@ -0,0 +1,23 @@
+---
+- name: Check cluster health
+ command: >
+ etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://{{ etcd_peer }}:2379 cluster-health
+ register: etcd_cluster_health
+ changed_when: false
+ failed_when: false
+
+- name: Assume a member is not healthy
+ set_fact:
+ etcd_member_healthy: false
+
+- name: Get member item health status
+ set_fact:
+ etcd_member_healthy: true
+ with_items: "{{ etcd_cluster_health.stdout_lines }}"
+ when: "(etcd_peer in item) and ('is healthy' in item)"
+
+- name: Check the etcd cluster health
+ # TODO(jchaloup): should we fail or ask user if he wants to continue? Or just wait until the cluster is healthy?
+ fail:
+ msg: "Etcd member {{ etcd_peer }} is not healthy"
+ when: not etcd_member_healthy
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml
new file mode 100644
index 000000000..90fe385c1
--- /dev/null
+++ b/roles/etcd_migrate/tasks/check_cluster_status.yml
@@ -0,0 +1,32 @@
+---
+# etcd_ip originates from etcd_common role
+- name: Check cluster status
+ command: >
+ etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints 'https://{{ etcd_peer }}:2379' -w json endpoint status
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcd_cluster_status
+
+- name: Retrieve raftIndex
+ set_fact:
+ etcd_member_raft_index: "{{ (l_etcd_cluster_status.stdout | from_json)[0]['Status']['raftIndex'] }}"
+
+- block:
+ # http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers
+ - name: Group all raftIndices into a list
+ set_fact:
+ etcd_members_raft_indices: "{{ groups['oo_etcd_to_config'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
+
+ - name: Check the minimum and the maximum of raftIndices is at most 1
+ set_fact:
+ etcd_members_raft_indices_diff: "{{ ((etcd_members_raft_indices | max | int) - (etcd_members_raft_indices | min | int)) | int }}"
+
+ - debug:
+ msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}"
+
+ when: inventory_hostname in groups.oo_etcd_to_config[0]
+
+# The cluster raft status is ok if the difference of the max and min raft index is at most 1
+- name: capture the status
+ set_fact:
+ l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_config[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
diff --git a/roles/etcd_migrate/tasks/configure.yml b/roles/etcd_migrate/tasks/configure.yml
new file mode 100644
index 000000000..a305d5bf3
--- /dev/null
+++ b/roles/etcd_migrate/tasks/configure.yml
@@ -0,0 +1,13 @@
+---
+- name: Configure master to use etcd3 storage backend
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_items:
+ - key: kubernetesMasterConfig.apiServerArguments.storage-backend
+ value:
+ - etcd3
+ - key: kubernetesMasterConfig.apiServerArguments.storage-media-type
+ value:
+ - application/vnd.kubernetes.protobuf
diff --git a/roles/etcd_migrate/tasks/main.yml b/roles/etcd_migrate/tasks/main.yml
new file mode 100644
index 000000000..409b0b613
--- /dev/null
+++ b/roles/etcd_migrate/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+- name: Fail if invalid r_etcd_migrate_action provided
+ fail:
+ msg: "etcd_migrate role can only be called with 'check' or 'migrate' or 'configure'"
+ when: r_etcd_migrate_action not in ['check', 'migrate', 'configure']
+
+- name: Include main action task file
+ include: "{{ r_etcd_migrate_action }}.yml"
+
+# 2. migrate v2 datadir into v3:
+# ETCDCTL_API=3 ./etcdctl migrate --data-dir=${data_dir} --no-ttl
+# backup the etcd datadir first
+# Provide a way for an operator to specify transformer
+
+# 3. re-configure OpenShift master at /etc/origin/master/master-config.yml
+# set storage-backend to “etcd3”
+# 4. we could leave the master restart to current logic (there is already the code ready (single vs. HA master))
+
+# Run
+# etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://172.16.186.45:2379 cluster-health
+# to check the cluster health (from the etcdctl.sh aliases file)
+
+# Another assumption:
+# - in order to migrate all etcd v2 data into v3, we need to shut down the cluster (let's verify that on Wednesday meeting)
+# -
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
new file mode 100644
index 000000000..cb479b0cc
--- /dev/null
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -0,0 +1,53 @@
+---
+# Should this be run in a serial manner?
+- set_fact:
+ l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+
+- name: Disable etcd members
+ service:
+ name: "{{ l_etcd_service }}"
+ state: stopped
+
+# Should we skip all TTL keys? https://bugzilla.redhat.com/show_bug.cgi?id=1389773
+- name: Migrate etcd data
+ command: >
+ etcdctl migrate --data-dir={{ etcd_data_dir }}
+ environment:
+ ETCDCTL_API: 3
+ register: l_etcdctl_migrate
+
+# TODO(jchaloup): If any of the members fails, we need to restore all members to v2 from the pre-migrate backup
+- name: Check the etcd v2 data are correctly migrated
+ fail:
+ msg: "Failed to migrate a member"
+ when: "'finished transforming keys' not in l_etcdctl_migrate.stdout"
+
+# TODO(jchaloup): start the etcd on a different port so noone can access it
+# Once the validation is done
+- name: Enable etcd member
+ service:
+ name: "{{ l_etcd_service }}"
+ state: started
+
+- name: Re-introduce leases (as a replacement for key TTLs)
+ command: >
+ oadm migrate etcd-ttl \
+ --cert {{ etcd_peer_cert_file }} \
+ --key {{ etcd_peer_key_file }} \
+ --cacert {{ etcd_peer_ca_file }} \
+ --etcd-address 'https://{{ etcd_peer }}:2379' \
+ --ttl-keys-prefix {{ item }} \
+ --lease-duration 1h
+ environment:
+ ETCDCTL_API: 3
+ with_items:
+ - "/kubernetes.io/events"
+ - "/kubernetes.io/masterleases"
+
+- set_fact:
+ r_etcd_migrate_success: true
+
+- name: Enable etcd member
+ service:
+ name: "{{ l_etcd_service }}"
+ state: started
diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
index e6bb196b8..c504bfb80 100644
--- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
@@ -35,7 +35,7 @@
mount:
state: mounted
fstype: glusterfs
- src: "{{ groups.oo_glusterfs_to_config[0] }}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ src: "{% if 'glusterfs_registry' in groups %}{{ groups.glusterfs_registry[0] }}{% else %}{{ groups.glusterfs[0] }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"
name: "{{ mktemp.stdout }}"
- name: Set registry volume permissions
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 6a082d71a..2d3ce5bcd 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,3 +1,4 @@
---
openshift_node_ips: []
r_openshift_master_clean_install: false
+r_openshift_master_etcd3_storage: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 035c15fef..aed5598c0 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -164,26 +164,6 @@
- restart master api
- restart master controllers
-- name: Configure master to use etcd3 storage backend on 3.6 clean installs
- yedit:
- src: /etc/origin/master/master-config.yaml
- key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_items:
- - key: kubernetesMasterConfig.apiServerArguments.storage-backend
- value:
- - etcd3
- - key: kubernetesMasterConfig.apiServerArguments.storage-media-type
- value:
- - application/vnd.kubernetes.protobuf
- when:
- - r_openshift_master_clean_install
- - openshift.common.version_gte_3_6
- notify:
- - restart master
- - restart master api
- - restart master controllers
-
- include: set_loopback_context.yml
when: openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 1935d9592..6c26e5092 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -139,6 +139,12 @@ kubernetesMasterConfig:
- v1
{% endif %}
apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
+ storage-backend:
+ - etcd3
+ storage-media-type:
+ - application/vnd.kubernetes.protobuf
+{% endif %}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 62fc35299..da4e348b4 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -90,7 +90,8 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin
| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes
| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi
-| openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service.
+| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
Each role variable also has a corresponding variable to optionally configure a
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 468877e57..4ff56af9e 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -13,11 +13,12 @@ openshift_storage_glusterfs_heketi_is_missing: True
openshift_storage_glusterfs_heketi_deploy_is_missing: True
openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
openshift_storage_glusterfs_heketi_version: 'latest'
-openshift_storage_glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
-openshift_storage_glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"
+openshift_storage_glusterfs_heketi_user_key: "{{ omit }}"
openshift_storage_glusterfs_heketi_topology_load: True
openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+openshift_storage_glusterfs_heketi_port: 8080
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
@@ -33,8 +34,9 @@ openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_gl
openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
-openshift_storage_glusterfs_registry_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
-openshift_storage_glusterfs_registry_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ omit }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ omit }}"
openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"
+openshift_storage_glusterfs_registry_heketi_port: 8080
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
index 81b4fa5dc..4434f750c 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml
@@ -29,7 +29,7 @@ objects:
- kind: Route
apiVersion: v1
metadata:
- name: deploy-heketi-${CLUSTER_NAME}
+ name: ${HEKETI_ROUTE}
labels:
glusterfs: deploy-heketi-${CLUSTER_NAME}-route
deploy-heketi: support
@@ -115,14 +115,19 @@ parameters:
displayName: Namespace
description: Set the namespace where the GlusterFS pods reside
value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
- name: IMAGE_NAME
- displayName: heketi container name
+ displayName: heketi container image name
required: True
- name: IMAGE_VERSION
- displayName: heketi container versiona
+ displayName: heketi container image version
required: True
- name: CLUSTER_NAME
displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
- name: TOPOLOGY_PATH
displayName: heketi topology file location
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
index dc3d2250a..8c5e1ded3 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml
@@ -125,11 +125,12 @@ parameters:
description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
value: '{ "glusterfs": "storage-host" }'
- name: IMAGE_NAME
- displayName: GlusterFS container name
+ displayName: GlusterFS container image name
required: True
- name: IMAGE_VERSION
- displayName: GlusterFS container versiona
+ displayName: GlusterFS container image version
required: True
- name: CLUSTER_NAME
displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
index 1d8f1abdf..e3fa0a9fb 100644
--- a/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml
@@ -27,7 +27,7 @@ objects:
- kind: Route
apiVersion: v1
metadata:
- name: heketi-${CLUSTER_NAME}
+ name: ${HEKETI_ROUTE}
labels:
glusterfs: heketi-${CLUSTER_NAME}-route
spec:
@@ -109,12 +109,17 @@ parameters:
displayName: Namespace
description: Set the namespace where the GlusterFS pods reside
value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
- name: IMAGE_NAME
- displayName: heketi container name
+ displayName: heketi container image name
required: True
- name: IMAGE_VERSION
- displayName: heketi container versiona
+ displayName: heketi container image version
required: True
- name: CLUSTER_NAME
displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 829c1f51b..4406ef28b 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -123,21 +123,32 @@
when:
- glusterfs_heketi_topology_load
-- include: heketi_deploy_part1.yml
+- name: Generate heketi admin key
+ set_fact:
+ glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}"
when:
- glusterfs_heketi_is_native
- - glusterfs_heketi_deploy_is_missing
- - glusterfs_heketi_is_missing
+ - glusterfs_heketi_admin_key is undefined
-- name: Set heketi URL
+- name: Generate heketi user key
set_fact:
- glusterfs_heketi_url: "localhost:8080"
+ glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}"
+ until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key"
+ delay: 1
+ retries: 10
+ when:
+ - glusterfs_heketi_is_native
+ - glusterfs_heketi_user_key is undefined
+
+- include: heketi_deploy_part1.yml
when:
- glusterfs_heketi_is_native
+ - glusterfs_heketi_deploy_is_missing
+ - glusterfs_heketi_is_missing
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
@@ -155,21 +166,43 @@
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
-- name: Create heketi user secret
+- name: Create heketi secret
oc_secret:
namespace: "{{ glusterfs_namespace }}"
state: present
- name: "heketi-{{ glusterfs_name }}-user-secret"
+ name: "heketi-{{ glusterfs_name }}-secret"
type: "kubernetes.io/glusterfs"
force: True
contents:
- path: key
- data: "{{ glusterfs_heketi_user_key }}"
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when:
+ - glusterfs_storageclass
+
+- name: Get heketi route
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: route
+ state: list
+ name: "heketi-{{ glusterfs_name }}"
+ register: heketi_route
+ when:
+ - glusterfs_storageclass
+ - glusterfs_heketi_is_native
+
+- name: Determine StorageClass heketi URL
+ set_fact:
+ glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+ when:
+ - glusterfs_storageclass
+ - glusterfs_heketi_is_native
- name: Generate GlusterFS StorageClass file
template:
src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ when:
+ - glusterfs_storageclass
- name: Create GlusterFS StorageClass
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index aa303d126..dbfe126a4 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -19,6 +19,7 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+ glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}"
glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 4c6891eeb..0849f2a2e 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -19,12 +19,13 @@
glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
- glusterfs_nodes: "{{ groups.glusterfs_registry }}"
+ glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}"
+ glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
- include: glusterfs_common.yml
when:
- - groups.glusterfs_registry | default([]) | count > 0
- - "'glusterfs' not in groups or groups.glusterfs_registry != groups.glusterfs"
+ - glusterfs_nodes | default([]) | count > 0
+ - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
- name: Delete pre-existing GlusterFS registry resources
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 318d34b5d..ea9b1fe1f 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -33,6 +33,7 @@
params:
IMAGE_NAME: "{{ glusterfs_heketi_image }}"
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 3a9619d9d..26343b909 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -103,6 +103,7 @@
params:
IMAGE_NAME: "{{ glusterfs_heketi_image }}"
IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+ HEKETI_ROUTE: "{{ glusterfs_heketi_url | default(['heketi-',glusterfs_name]|join) }}"
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
@@ -124,7 +125,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml
index c9bfdd1cd..d2d8c6c10 100644
--- a/roles/openshift_storage_glusterfs/tasks/main.yml
+++ b/roles/openshift_storage_glusterfs/tasks/main.yml
@@ -11,7 +11,7 @@
- include: glusterfs_registry.yml
when:
- - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
+ - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
- name: Delete temp directory
file:
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 9b8fae310..5ea801e60 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -5,6 +5,7 @@ metadata:
name: glusterfs-{{ glusterfs_name }}
provisioner: kubernetes.io/glusterfs
parameters:
- resturl: "http://{{ glusterfs_heketi_url }}:8081"
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
secretNamespace: "{{ glusterfs_namespace }}"
- secretName: "heketi-{{ glusterfs_name }}-user-secret"
+ secretName: "heketi-{{ glusterfs_name }}-secret"