summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--openshift-ansible.spec58
-rw-r--r--playbooks/byo/openshift-cluster/config.yml17
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml12
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml2
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml118
-rw-r--r--playbooks/common/openshift-checks/health.yml5
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml5
-rw-r--r--playbooks/common/openshift-cluster/config.yml25
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml9
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml8
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml120
-rw-r--r--playbooks/common/openshift-master/config.yml39
-rw-r--r--roles/ansible_service_broker/defaults/main.yml6
-rw-r--r--roles/ansible_service_broker/meta/main.yml15
-rw-r--r--roles/ansible_service_broker/tasks/install.yml269
-rw-r--r--roles/ansible_service_broker/tasks/main.yml8
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml65
-rw-r--r--roles/ansible_service_broker/tasks/validate_facts.yml15
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml14
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml14
-rw-r--r--roles/etcd_migrate/tasks/check.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd_migrate/tasks/check_cluster_status.yml8
-rw-r--r--roles/etcd_migrate/tasks/migrate.yml20
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py2
-rw-r--r--roles/openshift_ca/tasks/main.yml59
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py21
-rw-r--r--roles/openshift_certificate_expiry/test/conftest.py5
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py36
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml6
-rw-r--r--roles/openshift_logging/defaults/main.yml2
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml10
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j23
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j23
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j210
-rw-r--r--roles/openshift_logging_mux/templates/mux.j212
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/master_docker/master.docker.service.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j23
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j23
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml3
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml3
-rw-r--r--roles/openshift_metrics/tasks/install_hosa.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/templates/pvc.j23
-rw-r--r--roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py21
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml6
-rw-r--r--roles/openshift_service_catalog/defaults/main.yml3
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml161
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml38
-rw-r--r--roles/openshift_service_catalog/meta/main.yml17
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml70
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml181
-rw-r--r--roles/openshift_service_catalog/tasks/main.yml8
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml56
-rw-r--r--roles/openshift_service_catalog/tasks/start_api_server.yml22
-rw-r--r--roles/openshift_service_catalog/tasks/wire_aggregator.yml86
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j280
-rw-r--r--roles/openshift_service_catalog/templates/api_server_route.j214
-rw-r--r--roles/openshift_service_catalog/templates/api_server_service.j213
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j246
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager_service.j213
-rw-r--r--roles/openshift_service_catalog/vars/default_images.yml3
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j22
-rw-r--r--roles/openshift_version/tasks/main.yml196
-rw-r--r--test/integration/openshift_health_checker/setup_container.yml3
78 files changed, 1802 insertions, 328 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index a83752c29..65e17d2d9 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.123-1 ./
+3.6.128-1 ./
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 15b892219..97e17412f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.123
+Version: 3.6.128
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -280,6 +280,62 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Jun 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.128-1
+- parameterize etcd binary path (fabian@fabianism.us)
+- attach leases via the first master only and only once (jchaloup@redhat.com)
+- evalute groups when running etcd upgrade from byo/openshift-
+ cluster/upgrades/upgrade_etcd.yml (jchaloup@redhat.com)
+- Bug 1465168 - mux doesn't recognize ansible boolean parameters correctly
+ (rmeggins@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1003-1
+- Generate loopback kubeconfig separately to preserve OpenShift CA certificate.
+ (abutcher@redhat.com)
+- registry: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- router: look for the oc executable in /usr/local/bin and ~/bin
+ (gscrivan@redhat.com)
+- Retry docker startup once (sdodson@redhat.com)
+
+* Tue Jun 27 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1002-1
+- Fix typo in fluentd_secureforward_contents variable
+ (Andreas.Dembach@dg-i.net)
+- Reverting quotation change in ansible_service_broker install for etcd
+ (ewolinet@redhat.com)
+
+* Mon Jun 26 2017 Scott Dodson <sdodson@redhat.com> 3.6.123.1001-1
+- oc_atomic_container: use rpm to check the version. (gscrivan@redhat.com)
+- Fix .spec for stagecut (jupierce@redhat.com)
+- Picking change from sdodson (ewolinet@redhat.com)
+- openshift_version: skip nfs and lb hosts (smilner@redhat.com)
+- openshift_checks: eval groups before including role (lmeyer@redhat.com)
+- Adding volume fact for etcd for openshift ansible service broker
+ (ewolinet@redhat.com)
+- Updating to label node and wait for apiservice to be healthy and started
+ (ewolinet@redhat.com)
+- Also configure default registry on HA masters (sdodson@redhat.com)
+- Fix parsing certs with very large serial numbers (tbielawa@redhat.com)
+- fix yamllint issues (fabian@fabianism.us)
+- openshift_logging: use empty default for storage labels (fsimonce@redhat.com)
+- Set clean install and etcd storage on first master to fix scaleup
+ (sdodson@redhat.com)
+- images, syscontainer: change default value for ANSIBLE_CONFIG
+ (gscrivan@redhat.com)
+- Cleanup/updates for env variables and etcd image (fabian@fabianism.us)
+- Sync 3.5 cfme templates over to 3.6 (sdodson@redhat.com)
+- Moving checks down after required initialization happens.
+ (kwoodson@redhat.com)
+- add play and role to install ansible-service-broker (fabian@fabianism.us)
+- Creation of service_catalog and placeholder broker roles
+ (ewolinet@redhat.com)
+- GlusterFS: Use proper namespace for heketi command and service account
+ (jarrpa@redhat.com)
+- Fixing quote issue. (kwoodson@redhat.com)
+- GlusterFS: Fix heketi secret name (jarrpa@redhat.com)
+- Fix for dynamic pvs when using storageclasses. (kwoodson@redhat.com)
+- Ensure that host pki tree is mounted in containerized components
+ (sdodson@redhat.com)
+
* Fri Jun 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.6.123-1
- releases: enable build/push with multiple tags (lmeyer@redhat.com)
- Update template examples for 3.6 (rteague@redhat.com)
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 9c5948552..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -3,23 +3,6 @@
tags:
- always
-- name: Verify Requirements
- hosts: OSEv3
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: "install"
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
-
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
new file mode 100644
index 000000000..a9fc18958
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -0,0 +1,12 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/byo/hosts.*.example for the
+# currently supported method.
+#
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/service_catalog.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
index 8005a17a3..5bd5d64ab 100644
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -1,4 +1,6 @@
---
- include: ../initialize_groups.yml
+- include: ../../../common/openshift-cluster/evaluate_groups.yml
+
- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
index fd02e066e..143016159 100644
--- a/playbooks/byo/openshift-etcd/migrate.yml
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -3,122 +3,6 @@
tags:
- always
-- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-etcd/migrate.yml
tags:
- always
-
-- name: Run pre-checks
- hosts: oo_etcd_to_config
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: check
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-
-# TODO(jchaloup): replace the std_include with something minimal so the entire playbook is faster
-# e.g. I don't need to detect the OCP version, install deps, etc.
-- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
-
-- name: Backup v2 data
- hosts: oo_etcd_to_config
- gather_facts: no
- tags:
- - always
- roles:
- - role: openshift_facts
- - role: etcd_common
- r_etcd_common_action: backup
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- r_etcd_common_backup_tag: pre-migration
- r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_config)
- | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.oo_etcd_to_config | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when:
- - etcd_backup_failed | length > 0
-
-- name: Prepare masters for etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master' }}"
- - set_fact:
- master_services:
- - "{{ openshift.common.service_type + '-master-controllers' }}"
- - "{{ openshift.common.service_type + '-master-api' }}"
- when:
- - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
- - debug:
- msg: "master service name: {{ master_services }}"
- - name: Stop masters
- service:
- name: "{{ item }}"
- state: stopped
- with_items: "{{ master_services }}"
-
-- name: Migrate etcd data from v2 to v3
- hosts: oo_etcd_to_config
- gather_facts: no
- tags:
- - always
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: migrate
- etcd_peer: "{{ ansible_default_ipv4.address }}"
-
-- name: Gate on etcd migration
- hosts: oo_masters_to_config
- gather_facts: no
- tasks:
- - set_fact:
- etcd_migration_completed: "{{ hostvars
- | oo_select_keys(groups.oo_etcd_to_config)
- | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
- - set_fact:
- etcd_migration_failed: "{{ groups.oo_etcd_to_config | difference(etcd_migration_completed) }}"
-
-- name: Configure masters if etcd data migration is succesfull
- hosts: oo_masters_to_config
- roles:
- - role: etcd_migrate
- r_etcd_migrate_action: configure
- when: etcd_migration_failed | length == 0
- tasks:
- - debug:
- msg: "Skipping master re-configuration since migration failed."
- when:
- - etcd_migration_failed | length > 0
-
-- name: Start masters after etcd data migration
- hosts: oo_masters_to_config
- tasks:
- - name: Start master services
- service:
- name: "{{ item }}"
- state: started
- register: service_status
- # Sometimes the master-api, resp. master-controllers fails to start for the first time
- until: service_status.state is defined and service_status.state == "started"
- retries: 5
- delay: 10
- with_items: "{{ master_services[::-1] }}"
- - fail:
- msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
- when:
- - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 1bee460e8..c7766ff04 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- name: Run OpenShift health checks
hosts: OSEv3
roles:
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index e01c6f38d..7ca9f7e8b 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,4 +1,9 @@
---
+# openshift_health_checker depends on openshift_version which now requires group eval.
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
- hosts: OSEv3
name: run OpenShift pre-install checks
roles:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 1482b3a3f..7224ae712 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,4 +1,23 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
@@ -45,6 +64,12 @@
tags:
- hosted
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
+
- name: Re-enable excluder if it was previously enabled
hosts: oo_masters_to_config:oo_nodes_to_config
tags:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c28ce4c14..baca72c58 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -157,3 +157,12 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else groups.oo_first_master }}"
+ changed_when: no
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..c42e8781a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,8 @@
+---
+- include: evaluate_groups.yml
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..c655449fa
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,120 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+ tags:
+ - always
+
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- include: ../openshift-cluster/initialize_facts.yml
+ tags:
+ - always
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master' }}"
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ when:
+ - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Migrate etcd data from v2 to v3
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ tags:
+ - always
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+
+- name: Start masters after etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 70108fb7a..7d3a371e3 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -20,25 +20,6 @@
- node
- .config_managed
- - name: Check for existing configuration
- stat:
- path: /etc/origin/master/master-config.yaml
- register: master_config_stat
-
- - name: Set clean install fact
- set_fact:
- l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
-
- - name: Determine if etcd3 storage is in use
- command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
- register: etcd3_grep
- failed_when: false
- changed_when: false
-
- - name: Set etcd3 fact
- set_fact:
- l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
-
- set_fact:
openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"
when: openshift_master_pod_eviction_timeout is not defined
@@ -88,7 +69,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master session secrets and config
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -98,6 +79,24 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
- name: Generate master session secrets
hosts: oo_first_master
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
new file mode 100644
index 000000000..4a7252679
--- /dev/null
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+
+ansible_service_broker_remove: false
+ansible_service_broker_log_level: info
+# Recommended you do not enable this for now
+ansible_service_broker_launch_apb_on_bind: false
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml
new file mode 100644
index 000000000..ec4aafb79
--- /dev/null
+++ b/roles/ansible_service_broker/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Fabian von Feilitzsch
+ description: OpenShift Ansible Service Broker
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.1
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
new file mode 100644
index 000000000..81c3f8e5b
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -0,0 +1,269 @@
+---
+
+# Fact setting and validations
+- name: Set default image variables based on deployment type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: set ansible_service_broker facts
+ set_fact:
+ ansible_service_broker_image_prefix: "{{ ansible_service_broker_image_prefix | default(__ansible_service_broker_image_prefix) }}"
+ ansible_service_broker_image_tag: "{{ ansible_service_broker_image_tag | default(__ansible_service_broker_image_tag) }}"
+
+ ansible_service_broker_etcd_image_prefix: "{{ ansible_service_broker_etcd_image_prefix | default(__ansible_service_broker_etcd_image_prefix) }}"
+ ansible_service_broker_etcd_image_tag: "{{ ansible_service_broker_etcd_image_tag | default(__ansible_service_broker_etcd_image_tag) }}"
+ ansible_service_broker_etcd_image_etcd_path: "{{ ansible_service_broker_etcd_image_etcd_path | default(__ansible_service_broker_etcd_image_etcd_path) }}"
+
+ ansible_service_broker_registry_type: "{{ ansible_service_broker_registry_type | default(__ansible_service_broker_registry_type) }}"
+ ansible_service_broker_registry_url: "{{ ansible_service_broker_registry_url | default(__ansible_service_broker_registry_url) }}"
+ ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
+ ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
+ ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
+
+- name: set ansible-service-broker image facts using set prefix and tag
+ set_fact:
+ ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
+ ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
+
+- include: validate_facts.yml
+
+
+# Deployment of ansible-service-broker starts here
+- name: create openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+
+- name: create ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ labels:
+ app: ansible-service-broker
+ service: asb
+ ports:
+ - name: port-1338
+ port: 1338
+ selector:
+ app: ansible-service-broker
+ service: asb
+
+- name: create etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ ports:
+ - name: etcd-advertise
+ port: 2379
+ selector:
+ app: ansible-service-broker
+ service: etcd
+
+- name: create route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: present
+ service_name: asb
+ port: 1338
+ register: asb_route_out
+
+- name: get ansible-service-broker route name
+ set_fact:
+ ansible_service_broker_route: "{{ asb_route_out.results.results[0].spec.host }}"
+
+- name: create persistent volume claim for etcd
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: PersistentVolumeClaim
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+
+- name: create etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ selector:
+ matchLabels:
+ app: ansible-service-broker
+ service: etcd
+ strategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: ansible-service-broker
+ service: etcd
+ spec:
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_etcd_image }}"
+ name: etcd
+ imagePullPolicy: IfNotPresent
+ terminationMessagePath: /tmp/termination-log
+ workingDir: /etcd
+ args:
+ - '{{ ansible_service_broker_etcd_image_etcd_path }}'
+ - --data-dir=/data
+ - "--listen-client-urls=http://0.0.0.0:2379"
+ - "--advertise-client-urls=http://0.0.0.0:2379"
+ ports:
+ - containerPort: 2379
+ protocol: TCP
+ env:
+ - name: ETCDCTL_API
+ value: "3"
+ volumeMounts:
+ - mountPath: /data
+ name: etcd
+ volumes:
+ - name: etcd
+ persistentVolumeClaim:
+ claimName: etcd
+
+- name: create ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: Deployment
+ content:
+ path: /tmp/dcout
+ data:
+ apiVersion: extensions/v1beta1
+ kind: Deployment
+ metadata:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ strategy:
+ type: Recreate
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app: openshift-ansible-service-broker
+ service: asb
+ spec:
+ serviceAccount: asb
+ restartPolicy: Always
+ containers:
+ - image: "{{ ansible_service_broker_image }}"
+ name: asb
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: config-volume
+ mountPath: /etc/ansible-service-broker
+ ports:
+ - containerPort: 1338
+ protocol: TCP
+ env:
+ - name: BROKER_CONFIG
+ value: /etc/ansible-service-broker/config.yaml
+ terminationMessagePath: /tmp/termination-log
+ volumes:
+ - name: config-volume
+ configMap:
+ name: broker-config
+ items:
+ - key: broker-config
+ path: config.yaml
+
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: Create config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: present
+ kind: ConfigMap
+ content:
+ path: /tmp/cmout
+ data:
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ labels:
+ app: ansible-service-broker
+ data:
+ broker-config: |
+ registry:
+ name: "{{ ansible_service_broker_registry_type }}"
+ url: "{{ ansible_service_broker_registry_url }}"
+ user: "{{ ansible_service_broker_registry_user }}"
+ pass: "{{ ansible_service_broker_registry_password }}"
+ org: "{{ ansible_service_broker_registry_organization }}"
+ dao:
+ etcd_host: etcd
+ etcd_port: 2379
+ log:
+ logfile: /var/log/ansible-service-broker/asb.log
+ stdout: true
+ level: "{{ ansible_service_broker_log_level }}"
+ color: true
+ openshift: {}
+ broker:
+ devbroker: false
+ launchapbonbind: "{{ ansible_service_broker_launch_apb_on_bind }}"
+
+- name: Create the Broker resource in the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: present
+ kind: Broker
+ content:
+ path: /tmp/brokerout
+ data:
+ apiVersion: servicecatalog.k8s.io/v1alpha1
+ kind: Broker
+ metadata:
+ name: ansible-service-broker
+ spec:
+ url: http://{{ ansible_service_broker_route }}
diff --git a/roles/ansible_service_broker/tasks/main.yml b/roles/ansible_service_broker/tasks/main.yml
new file mode 100644
index 000000000..b46ce8233
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not ansible_service_broker_remove|default(false) | bool
+
+- include: remove.yml
+ when: ansible_service_broker_remove|default(false) | bool
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
new file mode 100644
index 000000000..2519f9f4c
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -0,0 +1,65 @@
+---
+
+- name: remove openshift-ansible-service-broker project
+ oc_project:
+ name: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker serviceaccount
+ oc_serviceaccount:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove ansible-service-broker service
+ oc_service:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd service
+ oc_service:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove route for ansible-service-broker service
+ oc_route:
+ name: asb-1338
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove persistent volume claim for etcd
+ oc_pvc:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+
+- name: remove etcd deployment
+ oc_obj:
+ name: etcd
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+- name: remove ansible-service-broker deployment
+ oc_obj:
+ name: asb
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: Deployment
+
+# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
+- name: remove config map for ansible-service-broker
+ oc_obj:
+ name: broker-config
+ namespace: openshift-ansible-service-broker
+ state: absent
+ kind: ConfigMap
+
+# TODO: Is this going to work?
+- name: remove broker object from the catalog
+ oc_obj:
+ name: ansible-service-broker
+ state: absent
+ kind: Broker
diff --git a/roles/ansible_service_broker/tasks/validate_facts.yml b/roles/ansible_service_broker/tasks/validate_facts.yml
new file mode 100644
index 000000000..604d24e1d
--- /dev/null
+++ b/roles/ansible_service_broker/tasks/validate_facts.yml
@@ -0,0 +1,15 @@
+---
+- name: validate Dockerhub registry settings
+ fail: msg="To use the dockerhub registry, you must provide the ansible_service_broker_registry_user. ansible_service_broker_registry_password, and ansible_service_broker_registry_organization parameters"
+ when:
+ - ansible_service_broker_registry_type == 'dockerhub'
+ - not (ansible_service_broker_registry_user and
+ ansible_service_broker_registry_password and
+ ansible_service_broker_registry_organization)
+
+
+- name: validate RHCC registry settings
+ fail: msg="To use the Red Hat Container Catalog registry, you must provide the ansible_service_broker_registry_url"
+ when:
+ - ansible_service_broker_registry_type == 'rhcc'
+ - not ansible_service_broker_registry_url
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
new file mode 100644
index 000000000..15e448515
--- /dev/null
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: ansibleplaybookbundle/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: quay.io/coreos/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /usr/local/bin/etcd
+
+__ansible_service_broker_registry_type: dockerhub
+__ansible_service_broker_registry_url: null
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..19b4a5147
--- /dev/null
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -0,0 +1,14 @@
+---
+
+__ansible_service_broker_image_prefix: openshift3/
+__ansible_service_broker_image_tag: latest
+
+__ansible_service_broker_etcd_image_prefix: rhel7/
+__ansible_service_broker_etcd_image_tag: latest
+__ansible_service_broker_etcd_image_etcd_path: /bin/etcd
+
+__ansible_service_broker_registry_type: rhcc
+__ansible_service_broker_registry_url: "https://registry.access.redhat.com"
+__ansible_service_broker_registry_user: null
+__ansible_service_broker_registry_password: null
+__ansible_service_broker_registry_organization: null
diff --git a/roles/etcd_migrate/tasks/check.yml b/roles/etcd_migrate/tasks/check.yml
index 2f07713bc..800073873 100644
--- a/roles/etcd_migrate/tasks/check.yml
+++ b/roles/etcd_migrate/tasks/check.yml
@@ -6,7 +6,7 @@
# Run the migration only if the data are v2
- name: Check if there are any v3 data
command: >
- etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:2379' get "" --from-key --keys-only -w json --limit 1
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' get "" --from-key --keys-only -w json --limit 1
environment:
ETCDCTL_API: 3
register: l_etcdctl_output
diff --git a/roles/etcd_migrate/tasks/check_cluster_health.yml b/roles/etcd_migrate/tasks/check_cluster_health.yml
index 1abd6a32f..201d83f99 100644
--- a/roles/etcd_migrate/tasks/check_cluster_health.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_health.yml
@@ -1,7 +1,7 @@
---
- name: Check cluster health
command: >
- etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt --endpoint https://{{ etcd_peer }}:2379 cluster-health
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
register: etcd_cluster_health
changed_when: false
failed_when: false
diff --git a/roles/etcd_migrate/tasks/check_cluster_status.yml b/roles/etcd_migrate/tasks/check_cluster_status.yml
index 90fe385c1..b69fb5a52 100644
--- a/roles/etcd_migrate/tasks/check_cluster_status.yml
+++ b/roles/etcd_migrate/tasks/check_cluster_status.yml
@@ -2,7 +2,7 @@
# etcd_ip originates from etcd_common role
- name: Check cluster status
command: >
- etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints 'https://{{ etcd_peer }}:2379' -w json endpoint status
+ etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints 'https://{{ etcd_peer }}:{{ etcd_client_port }}' -w json endpoint status
environment:
ETCDCTL_API: 3
register: l_etcd_cluster_status
@@ -15,7 +15,7 @@
# http://docs.ansible.com/ansible/playbooks_filters.html#extracting-values-from-containers
- name: Group all raftIndices into a list
set_fact:
- etcd_members_raft_indices: "{{ groups['oo_etcd_to_config'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
+ etcd_members_raft_indices: "{{ groups['oo_etcd_to_migrate'] | map('extract', hostvars, 'etcd_member_raft_index') | list | unique }}"
- name: Check the minimum and the maximum of raftIndices is at most 1
set_fact:
@@ -24,9 +24,9 @@
- debug:
msg: "Raft indices difference: {{ etcd_members_raft_indices_diff }}"
- when: inventory_hostname in groups.oo_etcd_to_config[0]
+ when: inventory_hostname in groups.oo_etcd_to_migrate[0]
# The cluster raft status is ok if the difference of the max and min raft index is at most 1
- name: capture the status
set_fact:
- l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_config[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
+ l_etcd_cluster_status_ok: "{{ hostvars[groups.oo_etcd_to_migrate[0]]['etcd_members_raft_indices_diff'] | int < 2 }}"
diff --git a/roles/etcd_migrate/tasks/migrate.yml b/roles/etcd_migrate/tasks/migrate.yml
index cb479b0cc..7f441568a 100644
--- a/roles/etcd_migrate/tasks/migrate.yml
+++ b/roles/etcd_migrate/tasks/migrate.yml
@@ -20,34 +20,36 @@
- name: Check the etcd v2 data are correctly migrated
fail:
msg: "Failed to migrate a member"
- when: "'finished transforming keys' not in l_etcdctl_migrate.stdout"
+ when: "'finished transforming keys' not in l_etcdctl_migrate.stdout and 'no v2 keys to migrate' not in l_etcdctl_migrate.stdout"
+
+- name: Migration message
+ debug:
+ msg: "Etcd migration finished with: {{ l_etcdctl_migrate.stdout }}"
-# TODO(jchaloup): start the etcd on a different port so noone can access it
-# Once the validation is done
- name: Enable etcd member
service:
name: "{{ l_etcd_service }}"
state: started
+# NOTE: /usr/local/bin may be removed from the PATH by ansible hence why
+# it's added to the environment in this task.
- name: Re-introduce leases (as a replacement for key TTLs)
command: >
oadm migrate etcd-ttl \
--cert {{ etcd_peer_cert_file }} \
--key {{ etcd_peer_key_file }} \
--cacert {{ etcd_peer_ca_file }} \
- --etcd-address 'https://{{ etcd_peer }}:2379' \
+ --etcd-address 'https://{{ etcd_peer }}:{{ etcd_client_port }}' \
--ttl-keys-prefix {{ item }} \
--lease-duration 1h
environment:
ETCDCTL_API: 3
+ PATH: "/usr/local/bin:/var/usrlocal/bin:{{ ansible_env.PATH }}"
with_items:
- "/kubernetes.io/events"
- "/kubernetes.io/masterleases"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+ run_once: true
- set_fact:
r_etcd_migrate_success: true
-
-- name: Enable etcd member
- service:
- name: "{{ l_etcd_service }}"
- state: started
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index 91c0d752f..955c6313e 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -194,7 +194,7 @@ def main():
)
# Verify that the platform supports atomic command
- rc, version_out, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
# This module requires atomic version 1.17.2 or later
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 16848e9c6..7b81760df 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -130,7 +130,7 @@ def main():
)
# Verify that the platform supports atomic command
- rc, version_out, err = module.run_command('atomic -v', check_rc=False)
+ rc, version_out, err = module.run_command('rpm -q --queryformat "%{VERSION}\n" atomic', check_rc=False)
if rc != 0:
module.fail_json(msg="Error in running atomic command", err=err)
# This module requires atomic version 1.17.2 or later
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index b9a7ec32f..419679bc2 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -117,25 +117,46 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the loopback master client config
- command: >
- {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
- {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
- --certificate-authority {{ named_ca_certificate }}
- {% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_ca_config_dir }}
- --groups=system:masters,system:openshift-master
- --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
- --signer-cert={{ openshift_ca_cert }}
- --signer-key={{ openshift_ca_key }}
- --signer-serial={{ openshift_ca_serial }}
- --user=system:openshift-master
- --basename=openshift-master
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
- --expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
+# create-api-client-config generates a ca.crt file which will
+# overwrite the OpenShift CA certificate. Generate the loopback
+# kubeconfig in a temporary directory and then copy files into the
+# master config dir to avoid overwriting ca.crt.
+- block:
+ - name: Create temp directory for loopback master client config
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: openshift_ca_loopback_tmpdir
+ - name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --client-dir={{ openshift_ca_loopback_tmpdir.stdout }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ - name: Copy generated loopback master client config to master config dir
+ copy:
+ src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ openshift_ca_config_dir }}"
+ remote_src: true
+ with_items:
+ - openshift-master.crt
+ - openshift-master.key
+ - openshift-master.kubeconfig
+ - name: Delete temp directory
+ file:
+ name: "{{ openshift_ca_loopback_tmpdir.stdout }}"
+ state: absent
when: loopback_context_string not in loopback_config.stdout
delegate_to: "{{ openshift_ca_host }}"
run_once: true
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 0242f5b43..44a8fa29b 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -104,6 +104,7 @@ platforms missing the Python OpenSSL library.
self.extensions = []
PARSING_ALT_NAMES = False
+ PARSING_HEX_SERIAL = False
for line in self.cert_string.split('\n'):
l = line.strip()
if PARSING_ALT_NAMES:
@@ -114,10 +115,26 @@ platforms missing the Python OpenSSL library.
PARSING_ALT_NAMES = False
continue
+ if PARSING_HEX_SERIAL:
+ # Hex serials arrive colon-delimited
+ serial_raw = l.replace(':', '')
+ # Convert to decimal
+ self.serial = int('0x' + serial_raw, base=16)
+ PARSING_HEX_SERIAL = False
+ continue
+
# parse out the bits that we can
if l.startswith('Serial Number:'):
- # Serial Number: 11 (0xb)
- # => 11
+ # Decimal format:
+ # Serial Number: 11 (0xb)
+ # => 11
+ # Hex Format (large serials):
+ # Serial Number:
+ # 0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
+ # => 14449739080294792594019643629255165375
+ if l.endswith(':'):
+ PARSING_HEX_SERIAL = True
+ continue
self.serial = int(l.split()[-2])
elif l.startswith('Not After :'):
diff --git a/roles/openshift_certificate_expiry/test/conftest.py b/roles/openshift_certificate_expiry/test/conftest.py
index 4ca35ecbc..df948fff0 100644
--- a/roles/openshift_certificate_expiry/test/conftest.py
+++ b/roles/openshift_certificate_expiry/test/conftest.py
@@ -23,7 +23,10 @@ VALID_CERTIFICATE_PARAMS = [
{
'short_name': 'combined',
'cn': 'combined.example.com',
- 'serial': 6,
+ # Verify that HUGE serials parse correctly.
+ # Frobs PARSING_HEX_SERIAL in _parse_cert
+ # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240
+ 'serial': 14449739080294792594019643629255165375,
'uses': b'clientAuth, serverAuth',
'dns': ['etcd'],
'ip': ['10.0.0.2', '192.168.0.2']
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0788ddfb0..cc2a1d2eb 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -537,6 +537,7 @@ def set_node_schedulability(facts):
return facts
+# pylint: disable=too-many-branches
def set_selectors(facts):
""" Set selectors facts if not already present in facts dict
Args:
@@ -570,6 +571,10 @@ def set_selectors(facts):
facts['hosted']['logging'] = {}
if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
facts['hosted']['logging']['selector'] = None
+ if 'etcd' not in facts['hosted']:
+ facts['hosted']['etcd'] = {}
+ if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
+ facts['hosted']['etcd']['selector'] = None
return facts
@@ -907,17 +912,17 @@ def set_version_facts_if_unset(facts):
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
- version_gte_3_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
- version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
- version_gte_3_6 = version >= LooseVersion('3.6.0')
+ version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
+ version_gte_3_6 = version >= LooseVersion('3.6')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_1_or_1_1 = True
@@ -2157,6 +2162,25 @@ class OpenShiftFacts(object):
create_pvc=False
)
),
+ etcd=dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='etcd',
+ size='1Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ ),
registry=dict(
storage=dict(
kind=None,
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index d895e9a68..2eeb2e7ce 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -135,7 +135,7 @@
- name: Determine the latest version of the OpenShift registry deployment
command: |
- oc get deploymentconfig {{ openshift_hosted_registry_name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ openshift_hosted_registry_name }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -143,7 +143,7 @@
- name: Sanity-check that the OpenShift registry rolled out correctly
command: |
- oc get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ openshift_hosted_registry_name }}-{{ openshift_hosted_registry_latest_version.stdout }} \
--namespace {{ openshift_hosted_registry_namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index 160ae2f5e..c60b67862 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -92,7 +92,7 @@
- name: Ensure OpenShift router correctly rolls out (best-effort today)
command: |
- oc rollout status deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \
--namespace {{ item.namespace | default('default') }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig
async: 600
@@ -102,7 +102,7 @@
- name: Determine the latest version of the OpenShift router deployment
command: |
- oc get deploymentconfig {{ item.name }} \
+ {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \
--namespace {{ item.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .status.latestVersion }'
@@ -111,7 +111,7 @@
- name: Poll for OpenShift router deployment success
command: |
- oc get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
+ {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
--namespace {{ item.0.namespace }} \
--config {{ openshift.common.config_base }}/master/admin.kubeconfig \
-o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 66d880d23..9b7767ccd 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -89,7 +89,7 @@ openshift_logging_es_cpu_limit: null
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "{{ openshift_hosted_logging_elasticsearch_instance_ram | default('8Gi') }}"
-openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default(null) }}"
+openshift_logging_es_pv_selector: "{{ openshift_hosted_logging_storage_labels | default('') }}"
openshift_logging_es_pvc_dynamic: "{{ openshift_hosted_logging_elasticsearch_pvc_dynamic | default(False) }}"
openshift_logging_es_pvc_size: "{{ openshift_hosted_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 684dbe0a0..d9ac52cb7 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -194,7 +194,9 @@
- port: 9200
targetPort: "restapi"
-- name: Creating ES storage template
+# storageclasses are used by default but if static then disable
+# storageclasses with the storageClassName set to "" in pvc.j2
+- name: Creating ES storage template - static
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -203,11 +205,13 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- not openshift_logging_elasticsearch_pvc_dynamic
-- name: Creating ES storage template
+# Storageclasses are used by default if configured
+- name: Creating ES storage template - dynamic
template:
src: pvc.j2
dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
@@ -216,8 +220,6 @@
size: "{{ openshift_logging_elasticsearch_pvc_size }}"
access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- annotations:
- volume.beta.kubernetes.io/storage-class: "dynamic"
when:
- openshift_logging_elasticsearch_storage_type == "pvc"
- openshift_logging_elasticsearch_pvc_dynamic
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 844dbc8c2..1ca4220a3 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -78,6 +78,9 @@ spec:
name: "INSTANCE_RAM"
value: "{{openshift_logging_elasticsearch_memory_limit}}"
-
+ name: "HEAP_DUMP_LOCATION"
+ value: "/elasticsearch/persistent/heapdump.hprof"
+ -
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
-
diff --git a/roles/openshift_logging_elasticsearch/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index f19a3a750..063f9c5ae 100644
--- a/roles/openshift_logging_elasticsearch/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -25,3 +25,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 8194223e8..30b596e22 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -100,7 +100,7 @@
- copy:
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
+ when: fluentd_secureforward_contents is undefined
changed_when: no
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index a5695ee26..d9814370f 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -98,9 +98,15 @@ spec:
- name: "BUFFER_SIZE_LIMIT"
value: "{{ openshift_logging_fluentd_buffer_size_limit }}"
- name: "FLUENTD_CPU_LIMIT"
- value: "{{ openshift_logging_fluentd_cpu_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.cpu
- name: "FLUENTD_MEMORY_LIMIT"
- value: "{{ openshift_logging_fluentd_memory_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "{{ daemonset_container_name }}"
+ resource: limits.memory
volumes:
- name: runlogjournal
hostPath:
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 243698c6a..c3f9b3433 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -102,15 +102,21 @@ spec:
- name: USE_MUX
value: "true"
- name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external | default('false') }}"
+ value: "{{ openshift_logging_mux_allow_external | default('false') | lower }}"
- name: "BUFFER_QUEUE_LIMIT"
value: "{{ openshift_logging_mux_buffer_queue_limit }}"
- name: "BUFFER_SIZE_LIMIT"
value: "{{ openshift_logging_mux_buffer_size_limit }}"
- name: "MUX_CPU_LIMIT"
- value: "{{ openshift_logging_mux_cpu_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.cpu
- name: "MUX_MEMORY_LIMIT"
- value: "{{ openshift_logging_mux_memory_limit }}"
+ valueFrom:
+ resourceFieldRef:
+ containerName: "mux"
+ resource: limits.memory
volumes:
- name: config
configMap:
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 897ee7285..e8f7c47b0 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 451f3436a..69db62f16 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf={{ openshift.docker.service_name }}.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/master_docker/master.docker.service.j2 b/roles/openshift_master/templates/master_docker/master.docker.service.j2
index 7f40cb042..31c1dfc33 100644
--- a/roles/openshift_master/templates/master_docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/master_docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/log:/var/log -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/pki:/etc/pki:ro {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index c484d23cc..c05a27559 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index e0adbbf52..a153fb33d 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,5 +1,8 @@
OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
+{% if openshift_push_via_dns | default(false) %}
+OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000
+{% endif %}
{% if openshift.common.is_containerized | bool %}
IMAGE_VERSION={{ openshift_image_tag }}
{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 62413536b..d9ffb1b6f 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -74,10 +74,10 @@
- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ --certificate-authority={{ openshift_ca_cert }}
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
--master={{ hostvars[item].openshift.master.loopback_api_url }}
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 7b81b3c10..8d7ee00ed 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -17,14 +17,17 @@
local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}"
with_items:
- hawkular-metrics
+ become: false
- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd"
register: hawkular_metrics_pwd
no_log: true
+ become: false
- name: generate htpasswd file for hawkular metrics
local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}"
no_log: true
+ become: false
- name: copy local generated passwords to target
copy:
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 62b7f52cb..7928a0346 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -36,6 +36,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when:
- openshift_metrics_cassandra_storage_type != 'emptydir'
@@ -50,8 +51,6 @@
obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}"
labels:
metrics-infra: hawkular-cassandra
- annotations:
- volume.beta.kubernetes.io/storage-class: dynamic
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
diff --git a/roles/openshift_metrics/tasks/install_hosa.yaml b/roles/openshift_metrics/tasks/install_hosa.yaml
index cc533a68b..7c9bc26d0 100644
--- a/roles/openshift_metrics/tasks/install_hosa.yaml
+++ b/roles/openshift_metrics/tasks/install_hosa.yaml
@@ -28,7 +28,7 @@
- name: Generate role binding for the hawkular-openshift-agent service account
template:
src: rolebinding.j2
- dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-agent-rolebinding.yaml"
+ dest: "{{ mktemp.stdout }}/templates/metrics-hawkular-openshift-agent-rolebinding.yaml"
vars:
cluster: True
obj_name: hawkular-openshift-agent-rb
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index 5cefb273d..584e3be05 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -4,6 +4,7 @@
register: htpasswd_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node"
when: htpasswd_check.rc == 1
@@ -13,6 +14,7 @@
register: keytool_check
failed_when: no
changed_when: no
+ become: false
- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
when: keytool_check.rc == 1
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 0b5f23c24..eaabdd20f 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,6 +1,7 @@
---
- local_action: shell python -c 'import passlib' 2>/dev/null || echo not installed
register: passlib_result
+ become: false
- name: Check that python-passlib is available on the control host
assert:
@@ -52,3 +53,4 @@
tags: metrics_cleanup
changed_when: False
check_mode: no
+ become: false
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
index 0b801b33f..b4e6a1503 100644
--- a/roles/openshift_metrics/templates/pvc.j2
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -32,3 +32,6 @@ spec:
resources:
requests:
storage: {{size}}
+{% if storage_class_name is defined %}
+ storageClassName: {{ storage_class_name }}
+{% endif %}
diff --git a/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
new file mode 100644
index 000000000..6ed6d404c
--- /dev/null
+++ b/roles/openshift_named_certificates/filter_plugins/openshift_named_certificates.py
@@ -0,0 +1,21 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+'''
+Custom filters for use with openshift named certificates
+'''
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use with openshift named certificates'''
+
+ @staticmethod
+ def oo_named_certificates_list(named_certificates):
+ ''' Returns named certificates list with correct fields for the master
+ config file.'''
+ return [{'certFile': named_certificate['certfile'],
+ 'keyFile': named_certificate['keyfile'],
+ 'names': named_certificate['names']} for named_certificate in named_certificates]
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {"oo_named_certificates_list": self.oo_named_certificates_list}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index d89b64b06..cd0a1a60b 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -17,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index 508eb9358..b24d8cec7 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -16,7 +16,11 @@
- name: Restart docker
service:
name: "{{ openshift.docker.service_name }}"
- state: restarted
+ state: started
+ register: docker_start_result
+ until: not docker_start_result | failed
+ retries: 1
+ delay: 30
- name: Update docker facts
openshift_facts:
diff --git a/roles/openshift_service_catalog/defaults/main.yml b/roles/openshift_service_catalog/defaults/main.yml
new file mode 100644
index 000000000..01ee2544d
--- /dev/null
+++ b/roles/openshift_service_catalog/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+openshift_service_catalog_remove: false
+openshift_service_catalog_nodeselector: {"openshift-infra": "apiserver"}
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
new file mode 100644
index 000000000..880146ca4
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -0,0 +1,161 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: service-catalog
+objects:
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer
+ rules:
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - serviceclasses
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: servicecatalog-serviceclass-viewer-binding
+ roleRef:
+ name: servicecatalog-serviceclass-viewer
+ groupNames:
+ - system:authenticated
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+
+- kind: ServiceAccount
+ apiVersion: v1
+ metadata:
+ name: service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: sar-creator
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - subjectaccessreviews.authorization.k8s.io
+ verbs:
+ - create
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-sar-creator-binding
+ roleRef:
+ name: sar-creator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: namespace-viewer
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - list
+ - watch
+ - get
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-namespace-viewer-binding
+ roleRef:
+ name: namespace-viewer
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ - podpresets
+ verbs:
+ - create
+ - update
+ - delete
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - servicecatalog.k8s.io
+ resources:
+ - brokers/status
+ - instances/status
+ - bindings/status
+ verbs:
+ - update
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: service-catalog-controller-binding
+ roleRef:
+ name: service-catalog-controller
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor
+ rules:
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - list
+ - watch
+ - get
+ - create
+ - update
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: endpoint-accessor-binding
+ roleRef:
+ name: endpoint-accessor
+ namespace: kube-service-catalog
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-controller
+
+- kind: ClusterRoleBinding
+ apiVersion: v1
+ metadata:
+ name: system:auth-delegator-binding
+ roleRef:
+ name: system:auth-delegator
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
new file mode 100644
index 000000000..f6ee0955d
--- /dev/null
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: kube-system-service-catalog
+objects:
+
+- kind: Role
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - extension-apiserver-authentication
+ resources:
+ - configmaps
+ verbs:
+ - get
+
+- kind: RoleBinding
+ apiVersion: v1
+ metadata:
+ name: extension-apiserver-authentication-reader-binding
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ roleRef:
+ name: extension-apiserver-authentication-reader
+ namespace: kube-system
+ userNames:
+ - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+
+parameters:
+- description: Do not change this value.
+ displayName: Name of the kube-system namespace
+ name: KUBE_SYSTEM_NAMESPACE
+ required: true
+ value: kube-system
diff --git a/roles/openshift_service_catalog/meta/main.yml b/roles/openshift_service_catalog/meta/main.yml
new file mode 100644
index 000000000..1e6b837cd
--- /dev/null
+++ b/roles/openshift_service_catalog/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Service Catalog
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
+- role: openshift_facts
+- role: lib_utils
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
new file mode 100644
index 000000000..cc897b032
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -0,0 +1,70 @@
+---
+- name: Create service catalog cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/service-catalog"
+ state: directory
+ mode: 0755
+ changed_when: False
+ check_mode: no
+
+- set_fact:
+ generated_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+
+- name: Generate signing cert
+ command: >
+ {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert
+ --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
+ --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+
+- name: Generating server keys
+ oc_adm_ca_server_cert:
+ cert: "{{ generated_certs_dir }}/apiserver.crt"
+ key: "{{ generated_certs_dir }}/apiserver.key"
+ hostnames: "apiserver.kube-service-catalog.svc,apiserver.kube-service-catalog.svc.cluster.local,apiserver.kube-service-catalog"
+ signer_cert: "{{ generated_certs_dir }}/ca.crt"
+ signer_key: "{{ generated_certs_dir }}/ca.key"
+ signer_serial: "{{ generated_certs_dir }}/apiserver.serial.txt"
+
+- name: Create apiserver-ssl secret
+ oc_secret:
+ state: present
+ name: apiserver-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ - name: tls.key
+ path: "{{ generated_certs_dir }}/apiserver.key"
+
+- slurp:
+ src: "{{ generated_certs_dir }}/ca.crt"
+ register: apiserver_ca
+
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
+- name: Create api service
+ oc_obj:
+ state: present
+ name: v1alpha1.servicecatalog.k8s.io
+ kind: apiservices.apiregistration.k8s.io
+ namespace: "kube-service-catalog"
+ content:
+ path: /tmp/apisvcout
+ data:
+ apiVersion: apiregistration.k8s.io/v1beta1
+ kind: APIService
+ metadata:
+ name: v1alpha1.servicecatalog.k8s.io
+ spec:
+ group: servicecatalog.k8s.io
+ version: v1alpha1
+ service:
+ namespace: "kube-service-catalog"
+ name: apiserver
+ caBundle: "{{ apiserver_ca.content }}"
+ groupPriorityMinimum: 20
+ versionPriority: 10
+ when: "'not found' in get_apiservices.stdout"
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
new file mode 100644
index 000000000..c1773b5f6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -0,0 +1,181 @@
+---
+# do any asserts here
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+
+- include: wire_aggregator.yml
+
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set service_catalog image facts
+ set_fact:
+ openshift_service_catalog_image_prefix: "{{ openshift_service_catalog_image_prefix | default(__openshift_service_catalog_image_prefix) }}"
+ openshift_service_catalog_image_version: "{{ openshift_service_catalog_image_version | default(__openshift_service_catalog_image_version) }}"
+
+- name: Set Service Catalog namespace
+ oc_project:
+ state: present
+ name: "kube-service-catalog"
+# node_selector: "{{ openshift_service_catalog_nodeselector | default(null) }}"
+
+- include: generate_certs.yml
+
+- copy:
+ src: kubeservicecatalog_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+
+- oc_obj:
+ name: service-catalog
+ kind: template
+ namespace: "kube-service-catalog"
+ files:
+ - "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: service-catalog
+ namespace: "kube-service-catalog"
+
+- copy:
+ src: kubesystem_roles_bindings.yml
+ dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+
+- oc_obj:
+ name: kube-system-service-catalog
+ kind: template
+ namespace: kube-system
+ files:
+ - "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
+ delete_after: yes
+
+- oc_process:
+ create: True
+ template_name: kube-system-service-catalog
+ namespace: kube-system
+
+- shell: >
+ oc get policybindings/kube-system:default -n kube-system || echo "not found"
+ register: get_kube_system
+ changed_when: no
+
+- command: >
+ oc create policybinding kube-system -n kube-system
+ when: "'not found' in get_kube_system.stdout"
+
+- oc_adm_policy_user:
+ namespace: kube-service-catalog
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:kube-service-catalog:service-catalog-apiserver"
+
+- name: Set SA cluster-role
+ oc_adm_policy_user:
+ state: present
+ namespace: "kube-service-catalog"
+ resource_kind: cluster-role
+ resource_name: admin
+ user: "system:serviceaccount:kube-service-catalog:default"
+
+## api server
+- template:
+ src: api_server.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ vars:
+ image: ""
+ namespace: ""
+ cpu_limit: none
+ memory_limit: none
+ cpu_requests: none
+ memory_request: none
+ cors_allowed_origin: localhost
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Service Catalog API Server daemonset
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_server.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_service.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+
+- name: Set Service Catalog API Server service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_service.yml"
+ delete_after: yes
+
+- template:
+ src: api_server_route.j2
+ dest: "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+
+- name: Set Service Catalog API Server route
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+ files:
+ - "{{ mktemp.stdout }}/service_catalog_api_route.yml"
+ delete_after: yes
+
+## controller manager
+- template:
+ src: controller_manager.j2
+ dest: "{{ mktemp.stdout }}/controller_manager.yml"
+ vars:
+ image: ""
+ cpu_limit: none
+ memory_limit: none
+ node_selector: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) }}"
+
+- name: Set Controller Manager deployment
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager.yml"
+ delete_after: yes
+
+- template:
+ src: controller_manager_service.j2
+ dest: "{{ mktemp.stdout }}/controller_manager_service.yml"
+
+- name: Set Controller Manager service
+ oc_obj:
+ state: present
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+ files:
+ - "{{ mktemp.stdout }}/controller_manager_service.yml"
+ delete_after: yes
+
+- include: start_api_server.yml
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_service_catalog/tasks/main.yml b/roles/openshift_service_catalog/tasks/main.yml
new file mode 100644
index 000000000..dc0d6a370
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# do any asserts here
+
+- include: install.yml
+ when: not openshift_service_catalog_remove | default(false) | bool
+
+- include: remove.yml
+ when: openshift_service_catalog_remove | default(false) | bool
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
new file mode 100644
index 000000000..2fb1ec440
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -0,0 +1,56 @@
+---
+- name: Remove Service Catalog APIServer
+ command: >
+ oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
+
+- name: Remove Policy Binding
+ command: >
+ oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+
+# TODO: this module doesn't currently remove this
+#- name: Remove service catalog api service
+# oc_obj:
+# state: absent
+# namespace: "kube-service-catalog"
+# kind: apiservices.apiregistration.k8s.io
+# name: v1alpha1.servicecatalog.k8s.io
+
+- name: Remove Service Catalog API Server route
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: route
+ name: apiserver
+
+- name: Remove Service Catalog API Server service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: apiserver
+
+- name: Remove Service Catalog API Server daemonset
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: daemonset
+ name: apiserver
+
+- name: Remove Controller Manager service
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: service
+ name: controller-manager
+
+- name: Remove Controller Manager deployment
+ oc_obj:
+ state: absent
+ namespace: "kube-service-catalog"
+ kind: deployment
+ name: controller-manager
+
+- name: Remove Service Catalog namespace
+ oc_project:
+ state: absent
+ name: "kube-service-catalog"
diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml
new file mode 100644
index 000000000..b143292b6
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/start_api_server.yml
@@ -0,0 +1,22 @@
+---
+# Label nodes and wait for apiserver and controller to be running (at least one)
+- name: Label {{ openshift.node.nodename }} for APIServer and controller deployment
+ oc_label:
+ name: "{{ openshift.node.nodename }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}"
+
+# wait to see that the apiserver is available
+- name: wait for api server to be ready
+ command: >
+ curl -k https://apiserver.kube-service-catalog.svc/healthz
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_health
+ until: api_health.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
diff --git a/roles/openshift_service_catalog/tasks/wire_aggregator.yml b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
new file mode 100644
index 000000000..3e5897ba4
--- /dev/null
+++ b/roles/openshift_service_catalog/tasks/wire_aggregator.yml
@@ -0,0 +1,86 @@
+---
+# TODO: this currently has a bug where hostnames are required
+- name: Creating Aggregator signer certs
+ command: >
+ oc adm ca create-signer-cert
+ --cert=/etc/origin/master/front-proxy-ca.crt
+ --key=/etc/origin/master/front-proxy-ca.key
+ --serial=/etc/origin/master/ca.serial.txt
+# oc_adm_ca_server_cert:
+# cert: /etc/origin/master/front-proxy-ca.crt
+# key: /etc/origin/master/front-proxy-ca.key
+
+- name: Create api-client config for Aggregator
+ command: >
+ oc adm create-api-client-config
+ --certificate-authority=/etc/origin/master/front-proxy-ca.crt
+ --signer-cert=/etc/origin/master/front-proxy-ca.crt
+ --signer-key=/etc/origin/master/front-proxy-ca.key
+ --user aggregator-front-proxy
+ --client-dir=/etc/origin/master
+ --signer-serial=/etc/origin/master/ca.serial.txt
+
+- name: Update master config
+ yedit:
+ state: present
+ src: /etc/origin/master/master-config.yaml
+ edits:
+ - key: aggregatorConfig.proxyClientInfo.certFile
+ value: aggregator-front-proxy.crt
+ - key: aggregatorConfig.proxyClientInfo.keyFile
+ value: aggregator-front-proxy.key
+ - key: authConfig.requestHeader.clientCA
+ value: front-proxy-ca.crt
+ - key: authConfig.requestHeader.clientCommonNames
+ value: [aggregator-front-proxy]
+ - key: authConfig.requestHeader.usernameHeaders
+ value: [X-Remote-User]
+ - key: authConfig.requestHeader.groupHeaders
+ value: [X-Remote-Group]
+ - key: authConfig.requestHeader.extraHeaderPrefixes
+ value: [X-Remote-Extra-]
+ register: yedit_output
+
+#restart master serially here
+- name: restart master
+ systemd: name={{ openshift.common.service_type }}-master state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is not defined or not openshift.master.ha | bool
+
+- name: restart master api
+ systemd: name={{ openshift.common.service_type }}-master-api state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: restart master controllers
+ systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
+ when:
+ - yedit_output.changed
+ - openshift.master.ha is defined and openshift.master.ha | bool
+ - openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {% else %}
+ --cacert {{ openshift.common.config_base }}/master/ca.crt
+ {% endif %}
+ {{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
+ register: api_available_output
+ until: api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ changed_when: false
+ when:
+ - yedit_output.changed
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
new file mode 100644
index 000000000..8ae6b6c8d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -0,0 +1,80 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: apiserver
+ name: apiserver
+spec:
+ selector:
+ matchLabels:
+ app: apiserver
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: apiserver
+ spec:
+ serviceAccountName: service-catalog-apiserver
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - args:
+ - --storage-type
+ - etcd
+ - --secure-port
+ - "6443"
+ - --etcd-servers
+# TODO: come back and get openshift.common.hostname to work
+ - https://{{ openshift.common.ip }}:{{ openshift.master.etcd_port }}
+ - --etcd-cafile
+ - /etc/origin/master/master.etcd-ca.crt
+ - --etcd-certfile
+ - /etc/origin/master/master.etcd-client.crt
+ - --etcd-keyfile
+ - /etc/origin/master/master.etcd-client.key
+ - -v
+ - "10"
+ - --cors-allowed-origins
+ - {{ cors_allowed_origin }}
+ - --admission-control
+ - "KubernetesNamespaceLifecycle"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/apiserver"]
+ imagePullPolicy: Always
+ name: apiserver
+ ports:
+ - containerPort: 6443
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: apiserver-ssl
+ readOnly: true
+ - mountPath: /etc/origin/master
+ name: etcd-host-cert
+ readOnly: true
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: apiserver-ssl
+ secret:
+ defaultMode: 420
+ secretName: apiserver-ssl
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ - key: tls.key
+ path: apiserver.key
+ - hostPath:
+ path: /etc/origin/master
+ name: etcd-host-cert
+ - emptyDir: {}
+ name: data-dir
diff --git a/roles/openshift_service_catalog/templates/api_server_route.j2 b/roles/openshift_service_catalog/templates/api_server_route.j2
new file mode 100644
index 000000000..3c3da254d
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_route.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Route
+metadata:
+ name: apiserver
+spec:
+ port:
+ targetPort: secure
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: apiserver
+ weight: 100
+ wildcardPolicy: None
diff --git a/roles/openshift_service_catalog/templates/api_server_service.j2 b/roles/openshift_service_catalog/templates/api_server_service.j2
new file mode 100644
index 000000000..bae337201
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/api_server_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: apiserver
+spec:
+ ports:
+ - name: secure
+ port: 443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: apiserver
+ sessionAffinity: None
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
new file mode 100644
index 000000000..33932eeb7
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ labels:
+ app: controller-manager
+ name: controller-manager
+spec:
+ selector:
+ matchLabels:
+ app: controller-manager
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: controller-manager
+ spec:
+ nodeSelector:
+{% for key, value in node_selector.iteritems() %}
+ {{key}}: "{{value}}"
+{% endfor %}
+ containers:
+ - env:
+ - name: K8S_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ args:
+ - -v
+ - "5"
+ - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
+ command: ["/usr/bin/controller-manager"]
+ imagePullPolicy: Always
+ name: controller-manager
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
diff --git a/roles/openshift_service_catalog/templates/controller_manager_service.j2 b/roles/openshift_service_catalog/templates/controller_manager_service.j2
new file mode 100644
index 000000000..2bac645fc
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/controller_manager_service.j2
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: controller-manager
+spec:
+ ports:
+ - port: 6443
+ protocol: TCP
+ targetPort: 6443
+ selector:
+ app: controller-manager
+ sessionAffinity: None
+ type: ClusterIP
diff --git a/roles/openshift_service_catalog/vars/default_images.yml b/roles/openshift_service_catalog/vars/default_images.yml
new file mode 100644
index 000000000..6fb9d1b86
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "docker.io/openshift/origin-"
+__openshift_service_catalog_image_version: "latest"
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..8c3f14485
--- /dev/null
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/"
+__openshift_service_catalog_image_version: "3.6.0"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 4406ef28b..af901103e 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -26,7 +26,7 @@
- kind: "sa"
name: "heketi-{{ glusterfs_name }}-service-account"
- kind: "secret"
- name: "heketi-{{ glusterfs_name }}-user-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
failed_when: False
when: glusterfs_heketi_wipe
@@ -66,6 +66,7 @@
- name: Add heketi service account to privileged SCC
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: scc
resource_name: privileged
@@ -74,6 +75,7 @@
- name: Allow heketi service account to view/edit pods
oc_adm_policy_user:
+ namespace: "{{ glusterfs_namespace }}"
user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
resource_kind: role
resource_name: edit
@@ -148,7 +150,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {% endif %}heketi-cli -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
@@ -170,7 +172,7 @@
oc_secret:
namespace: "{{ glusterfs_namespace }}"
state: present
- name: "heketi-{{ glusterfs_name }}-secret"
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
type: "kubernetes.io/glusterfs"
force: True
contents:
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 26343b909..63009c539 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -4,7 +4,7 @@
register: setup_storage
- name: Copy heketi-storage list
- shell: "{{ openshift.common.client_binary }} rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
+ shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"
# This is used in the subsequent task
- name: Copy the admin client config
@@ -125,7 +125,7 @@
- name: Set heketi-cli command
set_fact:
- glusterfs_heketi_client: "oc rsh {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
+ glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} heketi-cli -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"
- name: Verify heketi service
command: "{{ glusterfs_heketi_client }} cluster list"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
index 5ea801e60..2ec9a9e9a 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-storageclass.yml.j2
@@ -8,4 +8,4 @@ parameters:
resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
restuser: "admin"
secretNamespace: "{{ glusterfs_namespace }}"
- secretName: "heketi-{{ glusterfs_name }}-secret"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 16792388f..f4cb8ddb2 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -84,115 +84,119 @@
- openshift_version is not defined
- openshift_protect_installed_version | bool
-- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
- when: not is_containerized | bool
-
-- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
- when: is_containerized | bool
-
-- block:
- - name: Get available {{ openshift.common.service_type}} version
- repoquery:
- name: "{{ openshift.common.service_type}}"
- ignore_excluders: true
- register: rpm_results
- - fail:
- msg: "Package {{ openshift.common.service_type}} not found"
- when: not rpm_results.results.package_found
- - set_fact:
- openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
- - name: Fail if rpm version and docker image version are different
- fail:
- msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
- # Both versions have the same string representation
+# The rest of these tasks should only execute on
+# masters and nodes as we can verify they have subscriptions
+- when:
+ - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ block:
+ - name: Set openshift_version for rpm installation
+ include: set_version_rpm.yml
+ when: not is_containerized | bool
+
+ - name: Set openshift_version for containerized installation
+ include: set_version_containerized.yml
+ when: is_containerized | bool
+
+ - block:
+ - name: Get available {{ openshift.common.service_type}} version
+ repoquery:
+ name: "{{ openshift.common.service_type}}"
+ ignore_excluders: true
+ register: rpm_results
+ - fail:
+ msg: "Package {{ openshift.common.service_type}} not found"
+ when: not rpm_results.results.package_found
+ - set_fact:
+ openshift_rpm_version: "{{ rpm_results.results.versions.available_versions.0 | default('0.0', True) }}"
+ - name: Fail if rpm version and docker image version are different
+ fail:
+ msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
+ # Both versions have the same string representation
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- - openshift_rpm_version != openshift_version
- # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
- - openshift_pkg_version is not defined
- - openshift_image_tag is not defined
- when:
- - is_containerized | bool
- - not is_atomic | bool
-
-# Warn if the user has provided an openshift_image_tag but is not doing a containerized install
-# NOTE: This will need to be modified/removed for future container + rpm installations work.
-- name: Warn if openshift_image_tag is defined when not doing a containerized install
- debug:
- msg: >
- openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
- when:
- - not is_containerized | bool
- - openshift_image_tag is defined
-
+ - is_containerized | bool
+ - not is_atomic | bool
+
+ # Warn if the user has provided an openshift_image_tag but is not doing a containerized install
+ # NOTE: This will need to be modified/removed for future container + rpm installations work.
+ - name: Warn if openshift_image_tag is defined when not doing a containerized install
+ debug:
+ msg: >
+ openshift_image_tag is used for containerized installs. If you are trying to
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
+ when:
+ - not is_containerized | bool
+ - openshift_image_tag is defined
-# At this point we know openshift_version is set appropriately. Now we set
-# openshift_image_tag and openshift_pkg_version, so all roles can always assume
-# each of this variables *will* be set correctly and can use them per their
-# intended purpose.
+ # At this point we know openshift_version is set appropriately. Now we set
+ # openshift_image_tag and openshift_pkg_version, so all roles can always assume
+ # each of this variables *will* be set correctly and can use them per their
+ # intended purpose.
-- block:
- - debug:
- msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}"
- - set_fact:
- openshift_image_tag: v{{ openshift_version }}
+ - set_fact:
+ openshift_image_tag: v{{ openshift_version }}
- when: openshift_image_tag is not defined
+ when: openshift_image_tag is not defined
-- block:
- - debug:
- msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
+ - block:
+ - debug:
+ msg: "openshift_pkg_version was not defined. Falling back to -{{ openshift_version }}"
- - set_fact:
- openshift_pkg_version: -{{ openshift_version }}
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
- when: openshift_pkg_version is not defined
+ when: openshift_pkg_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_version
- name: Abort if openshift_version was not set
- when: openshift_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_version
+ name: Abort if openshift_version was not set
+ when: openshift_version is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_image_tag
- name: Abort if openshift_image_tag was not set
- when: openshift_image_tag is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_image_tag
+ name: Abort if openshift_image_tag was not set
+ when: openshift_image_tag is not defined
-- fail:
- msg: openshift_version role was unable to set openshift_pkg_version
- name: Abort if openshift_pkg_version was not set
- when: openshift_pkg_version is not defined
+ - fail:
+ msg: openshift_version role was unable to set openshift_pkg_version
+ name: Abort if openshift_pkg_version was not set
+ when: openshift_pkg_version is not defined
-- fail:
- msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
- name: Abort if openshift_pkg_version was not set
- when:
- - not is_containerized | bool
- - openshift_version == '0.0'
+ - fail:
+ msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."
+ name: Abort if openshift_pkg_version was not set
+ when:
+ - not is_containerized | bool
+ - openshift_version == '0.0'
-# We can't map an openshift_release to full rpm version like we can with containers; make sure
-# the rpm version we looked up matches the release requested and error out if not.
-- name: For an RPM install, abort when the release requested does not match the available version.
- when:
- - not is_containerized | bool
- - openshift_release is defined
- assert:
- that:
- - openshift_version.startswith(openshift_release) | bool
- msg: |-
- You requested openshift_release {{ openshift_release }}, which is not matched by
- the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
- on host {{ inventory_hostname }}.
- We will only install the latest RPMs, so please ensure you are getting the release
- you expect. You may need to adjust your Ansible inventory, modify the repositories
- available on the host, or run the appropriate OpenShift upgrade playbook.
+ # We can't map an openshift_release to full rpm version like we can with containers; make sure
+ # the rpm version we looked up matches the release requested and error out if not.
+ - name: For an RPM install, abort when the release requested does not match the available version.
+ when:
+ - not is_containerized | bool
+ - openshift_release is defined
+ assert:
+ that:
+ - openshift_version.startswith(openshift_release) | bool
+ msg: |-
+ You requested openshift_release {{ openshift_release }}, which is not matched by
+ the latest OpenShift RPM we detected as {{ openshift.common.service_type }}-{{ openshift_version }}
+ on host {{ inventory_hostname }}.
+ We will only install the latest RPMs, so please ensure you are getting the release
+ you expect. You may need to adjust your Ansible inventory, modify the repositories
+ available on the host, or run the appropriate OpenShift upgrade playbook.
-# The end result of these three variables is quite important so make sure they are displayed and logged:
-- debug: var=openshift_release
+ # The end result of these three variables is quite important so make sure they are displayed and logged:
+ - debug: var=openshift_release
-- debug: var=openshift_image_tag
+ - debug: var=openshift_image_tag
-- debug: var=openshift_pkg_version
+ - debug: var=openshift_pkg_version
diff --git a/test/integration/openshift_health_checker/setup_container.yml b/test/integration/openshift_health_checker/setup_container.yml
index 8793d954e..33e94cf1f 100644
--- a/test/integration/openshift_health_checker/setup_container.yml
+++ b/test/integration/openshift_health_checker/setup_container.yml
@@ -43,3 +43,6 @@
delegate_facts: True
delegate_to: "{{ container_name }}"
with_dict: "{{ l_host_vars | default({}) }}"
+
+- include: ../../../playbooks/byo/openshift-cluster/initialize_groups.yml
+- include: ../../../playbooks/common/openshift-cluster/evaluate_groups.yml