diff options
Diffstat (limited to 'roles')
135 files changed, 1617 insertions, 578 deletions
diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index 8203d15f5..7397e2bec 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -64,7 +64,7 @@ docker_storage_setup_options: root_lv_mount_path: "{{ docker_storage_path }}" docker_storage_extra_options: - "--storage-opt overlay2.override_kernel_check=true" -- "--storage-opt overlay2.size={{ docker_storage_size }}" +- "{{ '--storage-opt overlay2.size=' ~ docker_storage_size if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' else '' }}" - "--graph={{ docker_storage_path}}" @@ -117,7 +117,7 @@ l_crio_image: "{{ openshift_crio_systemcontainer_image_override | default(l_crio # ----------------------- # l_crt_docker_image_dict: Fedora: "registry.fedoraproject.org/latest/docker" - Centos: "registry.centos.org/projectatomic/docker" + CentOS: "registry.centos.org/projectatomic/docker" RedHat: "registry.access.redhat.com/openshift3/container-engine" openshift_docker_image_tag_default: "latest" diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2 index b056087e0..ec540ea44 100644 --- a/roles/container_runtime/templates/docker_storage_setup.j2 +++ b/roles/container_runtime/templates/docker_storage_setup.j2 @@ -2,6 +2,7 @@ # /usr/lib/docker-storage-setup/docker-storage-setup. # # For more details refer to "man docker-storage-setup" +{% if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' %} DEVS={{ container_runtime_docker_storage_setup_device }} VG={{ docker_storage_setup_options.vg }} DATA_SIZE={{ docker_storage_setup_options.data_size }} @@ -9,4 +10,7 @@ STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}" CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}" CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}" +{% else %} +STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" +{% endif %} EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}" diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 05b2763d5..bfed58011 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1138,7 +1138,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 324f52689..c78e379d5 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1116,7 +1116,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 152f270ab..b1b2cb5b5 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 3082f5890..2773201d7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1110,7 +1110,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 92515889b..25cbed8b7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index fe565987c..e26214316 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1228,7 +1228,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 44de29592..62fca19e5 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1253,7 +1253,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 9761b4b4e..0c4bfa01f 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1102,7 +1102,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 047edffbb..36e6111eb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1108,7 +1108,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 0cea07256..ab4f153c7 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1152,7 +1152,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 1f52fba40..f334ddaa4 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1119,7 +1119,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 1b63a6c13..7e9078339 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1092,7 +1092,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): @@ -1485,7 +1485,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 94b08d9ce..e71e2eb5c 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1111,7 +1111,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index ad837fdb5..ac3279ef8 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1128,7 +1128,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 892546e56..ca53c4c97 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1131,7 +1131,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 38df585f0..877c78d93 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1063,7 +1063,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 70632f86d..507170424 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1120,7 +1120,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 4eee748d7..347e879ca 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1117,7 +1117,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 2e73a7645..93c96b817 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1124,7 +1124,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index e003770d8..3369cf134 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1168,7 +1168,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index c142f1f43..1b6202a26 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1106,7 +1106,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 62bda33ad..732299e48 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index c541e1bbd..a6cf764ff 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1171,7 +1171,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 646a39224..90d514292 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 99a8e8f3d..0d9acac0e 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1104,7 +1104,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 7e7d0fa60..6fb5a94e9 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1122,7 +1122,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 7bbe38819..feb69348b 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1164,7 +1164,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 63adbd6ac..0f024c048 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1076,7 +1076,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 3c07f8d4b..6f409f979 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1153,7 +1153,7 @@ class Utils(object): # pragma: no cover ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/src/class/oc_group.py b/roles/lib_openshift/src/class/oc_group.py index 89fb09ea4..53e6b6766 100644 --- a/roles/lib_openshift/src/class/oc_group.py +++ b/roles/lib_openshift/src/class/oc_group.py @@ -59,7 +59,7 @@ class OCGroup(OpenShiftCLI): def needs_update(self): ''' verify an update is needed ''' - return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True) + return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=['users'], debug=True) # pylint: disable=too-many-return-statements,too-many-branches @staticmethod diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1fb32164e..9a4ce3509 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -314,7 +314,7 @@ class Utils(object): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: - sfd.write(contents) + sfd.write(str(contents)) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py index b94c0b779..936fb1c38 100644 --- a/roles/lib_utils/library/docker_creds.py +++ b/roles/lib_utils/library/docker_creds.py @@ -148,10 +148,12 @@ def update_config(docker_config, registry, username, password): def write_config(module, docker_config, dest): '''Write updated credentials into dest/config.json''' + if not isinstance(docker_config, dict): + docker_config = docker_config.decode() conf_file_path = os.path.join(dest, 'config.json') try: with open(conf_file_path, 'w') as conf_file: - json.dump(docker_config.decode(), conf_file, indent=8) + json.dump(docker_config, conf_file, indent=8) except IOError as ioerror: result = {'failed': True, 'changed': False, diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py index 440b8ec28..efdfcf1c7 100644 --- a/roles/lib_utils/library/openshift_container_binary_sync.py +++ b/roles/lib_utils/library/openshift_container_binary_sync.py @@ -107,7 +107,7 @@ class BinarySyncer(object): self._sync_binary('oc') # Ensure correct symlinks created: - self._sync_symlink('kubectl', 'openshift') + self._sync_symlink('kubectl', 'oc') # Remove old oadm binary if os.path.exists(os.path.join(self.bin_dir, 'oadm')): diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 7b55dda56..c0411d641 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,9 +1,7 @@ --- - name: restart master api systemd: name={{ openshift_service_type }}-master-api state=restarted - when: > - (openshift_master_ha | bool) and - (not master_api_service_status_changed | default(false)) + when: (not master_api_service_status_changed | default(false)) # TODO: need to fix up ignore_errors here # We retry the controllers because the API may not be 100% initialized yet. @@ -13,7 +11,5 @@ delay: 5 register: result until: result.rc == 0 - when: > - (openshift_master_ha | bool) and - (not master_controllers_service_status_changed | default(false)) + when: (not master_controllers_service_status_changed | default(false)) ignore_errors: yes diff --git a/roles/nuage_master/tasks/etcd_certificates.yml b/roles/nuage_master/tasks/etcd_certificates.yml new file mode 100644 index 000000000..99ec27f91 --- /dev/null +++ b/roles/nuage_master/tasks/etcd_certificates.yml @@ -0,0 +1,21 @@ +--- +- name: Generate openshift etcd certs + become: yes + include_role: + name: etcd + tasks_from: client_certificates + vars: + etcd_cert_prefix: nuageEtcd- + etcd_cert_config_dir: "{{ cert_output_dir }}" + embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_cert_subdir: "openshift-nuage-{{ openshift.common.hostname }}" + + +- name: Error if etcd certs are not copied + stat: + path: "{{ item }}" + with_items: + - "{{ cert_output_dir }}/nuageEtcd-ca.crt" + - "{{ cert_output_dir }}/nuageEtcd-client.crt" + - "{{ cert_output_dir }}/nuageEtcd-client.key" diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index 29e16b6f8..a1781dc56 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -81,6 +81,7 @@ - nuage.key - nuage.kubeconfig +- include_tasks: etcd_certificates.yml - include_tasks: certificates.yml - name: Install Nuage VSD user certificate @@ -99,7 +100,16 @@ become: yes template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644 -- name: Add the service account to the privileged scc to have root permissions +- name: Create Nuage Infra Pod daemon set yaml file + become: yes + template: src=nuage-infra-pod-config-daemonset.j2 dest=/etc/nuage-infra-pod-config-daemonset.yaml owner=root mode=0644 + +- name: Add the service account to the privileged scc to have root permissions for kube-system + shell: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:daemon-set-controller + ignore_errors: true + when: inventory_hostname == groups.oo_first_master.0 + +- name: Add the service account to the privileged scc to have root permissions for openshift-infra shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller ignore_errors: true when: inventory_hostname == groups.oo_first_master.0 @@ -114,6 +124,11 @@ ignore_errors: true when: inventory_hostname == groups.oo_first_master.0 +- name: Spawn Nuage Infra daemon sets pod + shell: oc create -f /etc/nuage-infra-pod-config-daemonset.yaml + ignore_errors: true + when: inventory_hostname == groups.oo_first_master.0 + - name: Restart daemons command: /bin/true notify: diff --git a/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 new file mode 100755 index 000000000..534a1517f --- /dev/null +++ b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 @@ -0,0 +1,39 @@ +# This manifest installs Nuage Infra pod on +# each worker node in an Openshift cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: nuage-infra-ds + namespace: kube-system + labels: + k8s-app: nuage-infra-ds +spec: + selector: + matchLabels: + k8s-app: nuage-infra-ds + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + k8s-app: nuage-infra-ds + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + containers: + # This container spawns a Nuage Infra pod + # on each worker node + - name: install-nuage-infra + image: nuage/infra:{{ nuage_infra_container_image_version }} + command: ["/install-nuage-infra-pod.sh"] + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log-dir + volumes: + - name: log-dir + hostPath: + path: /var/log diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 7be5d6743..3543eeb56 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -37,11 +37,14 @@ data: nuageMonServer: URL: 0.0.0.0:9443 certificateDirectory: {{ nuage_master_crt_dir }} + clientCA: "" + serverCertificate: "" + serverKey: "" # etcd config required for HA etcdClientConfig: - ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt - certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt - keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key + ca: {{ nuage_master_crt_dir }}/nuageEtcd-ca.crt + certFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.crt + keyFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.key urls: {% for etcd_url in openshift.master.etcd_urls %} - {{ etcd_url }} diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 6a1267d94..996a2d2b0 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -61,6 +61,8 @@ spec: selector: matchLabels: k8s-app: nuage-cni-ds + updateStrategy: + type: RollingUpdate template: metadata: labels: @@ -104,6 +106,8 @@ spec: - mountPath: /var/log name: cni-log-dir - mountPath: {{ nuage_node_config_dsets_mount_dir }} + name: var-usr-share-dir + - mountPath: /usr/share/ name: usr-share-dir volumes: - name: cni-bin-dir @@ -121,9 +125,12 @@ spec: - name: cni-log-dir hostPath: path: /var/log - - name: usr-share-dir + - name: var-usr-share-dir hostPath: path: {{ nuage_node_config_dsets_mount_dir }} + - name: usr-share-dir + hostPath: + path: /usr/share/ --- @@ -164,7 +171,7 @@ spec: - name: NUAGE_PLATFORM value: '"kvm, k8s"' - name: NUAGE_K8S_SERVICE_IPV4_SUBNET - value: '192.168.0.0\/16' + value: '172.30.0.0\/16' - name: NUAGE_NETWORK_UPLINK_INTF value: "eth0" volumeMounts: diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 114514d7c..5045e1cc5 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -26,9 +26,10 @@ nuage_master_config_dsets_mount_dir: /usr/share/ nuage_node_config_dsets_mount_dir: /usr/share/ nuage_cni_bin_dsets_mount_dir: /opt/cni/bin nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d -nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}" -nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}" -nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}" +nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.2.1') }}" +nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.2.1') }}" +nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.2.1') }}" +nuage_infra_container_image_version: "{{ nuage_infra_image_version | default('v5.2.1') }}" api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}" master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}" diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index e14d57702..1696c2751 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -42,60 +42,79 @@ openshift_aws_ami_tags: openshift_aws_s3_mode: create openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry" -openshift_aws_elb_health_check: - ping_protocol: tcp - ping_port: 443 - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 2 - openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" -openshift_aws_elb_name_dict: - master: - external: "{{ openshift_aws_elb_basename }}-master-external" - internal: "{{ openshift_aws_elb_basename }}-master-internal" - infra: - external: "{{ openshift_aws_elb_basename }}-infra" - -openshift_aws_elb_idle_timout: 400 openshift_aws_elb_cert_arn: '' openshift_aws_elb_dict: master: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: ssl - instance_port: 443 - - protocol: ssl - load_balancer_port: 443 - instance_protocol: ssl - instance_port: 443 - # ssl certificate required for https or ssl - ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + - protocol: ssl + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: ssl + instance_port: "{{ openshift_master_api_port | default(8443) }}" + ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}" + name: "{{ openshift_aws_elb_basename }}-master-external" + tags: "{{ openshift_aws_kube_tags }}" internal: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 80 - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: "{{ openshift_master_api_port | default(8443) }}" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 80 + - protocol: tcp + load_balancer_port: "{{ openshift_master_api_port | default(8443) }}" + instance_protocol: tcp + instance_port: "{{ openshift_master_api_port | default(8443) }}" + name: "{{ openshift_aws_elb_basename }}-master-internal" + tags: "{{ openshift_aws_kube_tags }}" infra: external: - - protocol: tcp - load_balancer_port: 80 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True - - protocol: tcp - load_balancer_port: 443 - instance_protocol: tcp - instance_port: 443 - proxy_protocol: True + cross_az_load_balancing: False + health_check: + ping_protocol: tcp + ping_port: 443 + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 2 + idle_timout: 400 + listeners: + - protocol: tcp + load_balancer_port: 80 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + - protocol: tcp + load_balancer_port: 443 + instance_protocol: tcp + instance_port: 443 + proxy_protocol: True + name: "{{ openshift_aws_elb_basename }}-infra" + tags: "{{ openshift_aws_kube_tags }}" openshift_aws_node_group_config_master_volumes: - device_name: /dev/sda1 @@ -172,7 +191,7 @@ openshift_aws_master_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}" + elbs: "{{ openshift_aws_elb_dict | json_query('master.[*][0][*].name') }}" openshift_aws_node_group_config: # The 'compute' key is always required here. @@ -205,10 +224,7 @@ openshift_aws_node_group_config: iam_role: "{{ openshift_aws_iam_role_name }}" policy_name: "{{ openshift_aws_iam_role_policy_name }}" policy_json: "{{ openshift_aws_iam_role_policy_json }}" - elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}" - -openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}" -openshift_aws_elb_az_load_balancing: False + elbs: "{{ openshift_aws_elb_dict | json_query('infra.[*][0][*].name') }}" # build_instance_tags is a custom filter in role lib_utils openshift_aws_kube_tags: "{{ openshift_aws_clusterid | build_instance_tags }}" @@ -253,8 +269,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 compute: name: "{{ openshift_aws_clusterid }}_compute" @@ -268,8 +284,8 @@ openshift_aws_node_security_groups: to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp - from_port: 443 - to_port: 443 + from_port: "{{ openshift_master_api_port | default(8443) }}" + to_port: "{{ openshift_master_api_port | default(8443) }}" cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 30000 @@ -306,3 +322,8 @@ openshift_aws_masters_groups: masters,etcd,nodes # By default, don't delete things like the shared IAM instance # profile and uploaded ssh keys openshift_aws_enable_uninstall_shared_objects: False +# S3 bucket names are global by default and can take minutes/hours for the +# name to become available for re-use (assuming someone doesn't take the +# name in the meantime). Default to just emptying the contents of the S3 +# bucket if we've been asked to create the bucket during provisioning. +openshift_aws_really_delete_s3_bucket: False diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index 6f0028a3d..d8257cf31 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -5,18 +5,18 @@ - name: "Create ELB {{ l_elb_dict_item.key }}" ec2_elb_lb: - name: "{{ l_openshift_aws_elb_name_dict[l_elb_dict_item.key][item.key] }}" + name: "{{ item.value.name }}" state: present - cross_az_load_balancing: "{{ openshift_aws_elb_az_load_balancing }}" + cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" - idle_timeout: "{{ openshift_aws_elb_idle_timout }}" + idle_timeout: "{{ item.value.idle_timout }}" region: "{{ openshift_aws_region }}" subnets: - "{{ subnetout.subnets[0].id }}" - health_check: "{{ openshift_aws_elb_health_check }}" - listeners: "{{ item.value }}" + health_check: "{{ item.value.health_check }}" + listeners: "{{ item.value.listeners }}" scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" - tags: "{{ openshift_aws_elb_tags }}" + tags: "{{ item.value.tags }}" wait: True register: new_elb with_dict: "{{ l_elb_dict_item.value }}" diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml index 530b0134d..c2e362acd 100644 --- a/roles/openshift_aws/tasks/master_facts.yml +++ b/roles/openshift_aws/tasks/master_facts.yml @@ -3,7 +3,7 @@ ec2_elb_facts: region: "{{ openshift_aws_region }}" names: - - "{{ openshift_aws_elb_name_dict['master']['internal'] }}" + - "{{ openshift_aws_elb_dict['master']['internal']['name'] }}" delegate_to: localhost register: elbs diff --git a/roles/openshift_aws/tasks/provision_elb.yml b/roles/openshift_aws/tasks/provision_elb.yml index a52f63bd5..fcc49c3ea 100644 --- a/roles/openshift_aws/tasks/provision_elb.yml +++ b/roles/openshift_aws/tasks/provision_elb.yml @@ -10,6 +10,5 @@ with_dict: "{{ openshift_aws_elb_dict }}" vars: l_elb_security_groups: "{{ openshift_aws_elb_security_groups }}" - l_openshift_aws_elb_name_dict: "{{ openshift_aws_elb_name_dict }}" loop_control: loop_var: l_elb_dict_item diff --git a/roles/openshift_aws/tasks/uninstall_s3.yml b/roles/openshift_aws/tasks/uninstall_s3.yml new file mode 100644 index 000000000..0b08cbeed --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_s3.yml @@ -0,0 +1,26 @@ +--- +- name: empty S3 bucket + block: + - name: get S3 object list + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: list + region: "{{ openshift_aws_region }}" + register: s3_out + + - name: delete S3 objects + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: delobj + object: "{{ item }}" + with_items: "{{ s3_out.s3_keys }}" + when: openshift_aws_create_s3 | bool + +- name: delete S3 bucket + aws_s3: + bucket: "{{ openshift_aws_s3_bucket_name }}" + mode: delete + region: "{{ openshift_aws_region }}" + when: + - openshift_aws_create_s3 | bool + - openshift_aws_really_delete_s3_bucket | bool diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml new file mode 100644 index 000000000..37cbf5603 --- /dev/null +++ b/roles/openshift_cloud_provider/defaults/main.yml @@ -0,0 +1,4 @@ +--- +openshift_gcp_project: '' +openshift_gcp_prefix: '' +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index 395bd304c..9e1c31b1d 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -1,4 +1,12 @@ --- +- name: check variables are passed + fail: + msg: "Ensure correct variables are defined for gcp. {{ item }}" + when: item == '' + with_items: + - "{{ openshift_gcp_project }}" + - "{{ openshift_gcp_prefix }}" + # Work around ini_file create option in 2.2 which defaults to no - name: Create cloud config file file: @@ -16,8 +24,8 @@ option: "{{ item.key }}" value: "{{ item.value }}" with_items: - - { key: 'project-id', value: '{{ openshift_gcp_project }}' } - - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } - - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } - - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } - - { key: 'multizone', value: 'false' } + - { key: 'project-id', value: '{{ openshift_gcp_project }}' } + - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } + - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } + - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } + - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 014c06641..687d60171 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -1,4 +1,7 @@ --- +# Must not be blank if you're using vsphere +openshift_cloudprovider_vsphere_datacenter: '' + openshift_storageclass_defaults: aws: provisioner: aws-ebs @@ -19,6 +22,12 @@ openshift_storageclass_defaults: parameters: fstype: xfs + vsphere: + provisioner: vsphere-volume + name: standard + parameters: + datastore: "{{ openshift_cloudprovider_vsphere_datacenter }}" + openshift_storageclass_default: "true" openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}" openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}" diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index d6d31effd..452cc4ef6 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -15,8 +15,10 @@ import os import yaml import struct import socket +import ipaddress from distutils.util import strtobool from distutils.version import LooseVersion +from ansible.module_utils.six import u from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser @@ -1146,6 +1148,8 @@ def set_proxy_facts(facts): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) # We always add local dns domain and ourselves no matter what + kube_svc_ip = str(ipaddress.ip_network(u(common['portal_net']))[1]) + common['no_proxy'].append(kube_svc_ip) common['no_proxy'].append('.' + common['dns_domain']) common['no_proxy'].append('.svc') common['no_proxy'].append(common['hostname']) diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index d298fbab2..145b82491 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -171,16 +171,21 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck): required.add(self._registry_console_image(image_tag, image_info)) # images for containerized components - if self.get_var("openshift_is_containerized"): - components = set() + def add_var_or_default_img(var_name, comp_name): + """Returns: default image from comp_name, overridden by var_name in task_vars""" + default = "{}/{}:{}".format(image_info["namespace"], comp_name, image_tag) + required.add(self.template_var(self.get_var(var_name, default=default))) + + if self.get_var("openshift_is_containerized", convert=bool): if 'oo_nodes_to_config' in host_groups: - components.update(["node", "openvswitch"]) + add_var_or_default_img("osn_image", "node") + add_var_or_default_img("osn_ovs_image", "openvswitch") if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose" - components.add(image_info["name"]) - for component in components: - required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag)) - if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise - required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag + add_var_or_default_img("osm_image", image_info["name"]) + if 'oo_etcd_to_config' in host_groups: + # special case, note default is the same for origin/enterprise and has no image tag + etcd_img = self.get_var("osm_etcd_image", default="registry.access.redhat.com/rhel7/etcd") + required.add(self.template_var(etcd_img)) return required diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index 9fd6e049d..d31f263dd 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -276,11 +276,40 @@ def test_registry_console_image(task_vars, expected): assert expected == DockerImageAvailability(task_vars=task_vars)._registry_console_image(tag, info) -def test_containerized_etcd(): - task_vars = dict( +@pytest.mark.parametrize("task_vars, expected", [ + ( + dict( + group_names=['oo_nodes_to_config'], + osn_ovs_image='spam/ovs', + openshift_image_tag="veggs", + ), + set([ + 'spam/ovs', 'openshift/node:veggs', 'cockpit/kubernetes:latest', + 'openshift/origin-haproxy-router:veggs', 'openshift/origin-deployer:veggs', + 'openshift/origin-docker-registry:veggs', 'openshift/origin-pod:veggs', + ]), + ), ( + dict( + group_names=['oo_masters_to_config'], + ), + set(['openshift/origin:latest']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + ), + set(['registry.access.redhat.com/rhel7/etcd']), + ), ( + dict( + group_names=['oo_etcd_to_config'], + osm_etcd_image='spam/etcd', + ), + set(['spam/etcd']), + ), +]) +def test_containerized(task_vars, expected): + task_vars.update(dict( openshift_is_containerized=True, openshift_deployment_type="origin", - group_names=['oo_etcd_to_config'], - ) - expected = set(['registry.access.redhat.com/rhel7/etcd']) + )) + assert expected == DockerImageAvailability(task_vars=task_vars).required_images() diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index b39c44b01..7223a5afe 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -35,7 +35,7 @@ mount: state: mounted fstype: glusterfs - src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" + src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0 %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}" name: "{{ mktemp.stdout }}" - name: Set registry volume permissions diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index a192bd67e..c438236a4 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1. - `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. - `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect. +- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"}) - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'. - `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'. diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ced7397b5..6be47b1f8 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -140,4 +140,6 @@ console_config_edits: - key: clusterInfo#loggingPublicURL value: "" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 3afd8680f..c905502ac 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -71,10 +71,17 @@ - set_fact: openshift_logging_es_pvc_prefix="logging-es" when: openshift_logging_es_pvc_prefix == "" +# Using this module for setting this fact because otherwise we were getting a value of "" trying to +# use default() in the set_fact after this which caused us to not correctly evaluate +# openshift_logging_elasticsearch_storage_type +- conditional_set_fact: + facts: "{{ hostvars[inventory_hostname] }}" + vars: + elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type + - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0) else 'emptydir' }}" -# We don't allow scaling down of ES nodes currently - include_role: name: openshift_logging_elasticsearch vars: @@ -85,7 +92,8 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" @@ -112,7 +120,7 @@ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}" openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}" openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" @@ -133,7 +141,7 @@ when: openshift_logging_es_ops_pvc_prefix == "" - set_fact: - elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir') }}" + default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0) else 'emptydir' }}" when: - openshift_logging_use_ops | bool @@ -147,7 +155,8 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" + openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -189,7 +198,7 @@ openshift_logging_elasticsearch_ops_deployment: true openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" - openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" + openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}" openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}" openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -314,8 +323,8 @@ openshift_logging_install_eventrouter | default(false) | bool -# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 # Update asset config in openshift-web-console namespace - name: Add Kibana route information to web console asset config @@ -326,4 +335,6 @@ console_config_edits: - key: clusterInfo#loggingPublicURL value: "https://{{ openshift_logging_kibana_hostname }}" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index ff5ad1045..b731d93a0 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -137,6 +137,16 @@ - "prometheus_out.stderr | length > 0" - "'already exists' not in prometheus_out.stderr" +- set_fact: + _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}" + +- template: + src: passwd.j2 + dest: "{{mktemp.stdout}}/passwd.yml" + vars: + logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}" + logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}" + # View role and binding - name: Generate logging-elasticsearch-view-role template: @@ -255,6 +265,8 @@ path: "{{ generated_certs_dir }}/ca.crt" - name: admin.jks path: "{{ generated_certs_dir }}/system.admin.jks" + - name: passwd.yml + path: "{{mktemp.stdout}}/passwd.yml" # services - name: Set logging-{{ es_component }}-cluster service @@ -391,6 +403,7 @@ es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}" deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}" es_replicas: 1 + basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}" - name: Set ES dc oc_obj: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 4b189f255..b1d6a4489 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -51,6 +51,7 @@ spec: - -client-id={{openshift_logging_elasticsearch_prometheus_sa}} - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} + - -basic-auth-password={{ basic_auth_passwd }} - -upstream=https://localhost:9200 - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}' - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2 new file mode 100644 index 000000000..a22151eef --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2 @@ -0,0 +1,2 @@ +"{{logging_user_name}}": + passwd: "{{logging_user_passwd}}" diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 899193838..b69cbacae 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200 openshift_logging_kibana_replicas: 1 openshift_logging_kibana_edge_term_policy: Redirect +# map of env. var to add to the kibana deploymentconfig +openshift_logging_kibana_env_vars: {} + # this is used to determine if this is an operations deployment or a non-ops deployment # simply used for naming purposes openshift_logging_kibana_ops_deployment: false diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 3c3bd902e..c67235c62 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -251,6 +251,7 @@ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}" kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}" kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" + kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}" - name: Set Kibana DC oc_obj: diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 57d216373..ed05b8458 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -70,6 +70,10 @@ spec: resourceFieldRef: containerName: kibana resource: limits.memory +{% for key, value in kibana_env_vars.items() %} + - name: "{{ key }}" + value: "{{ value }}" +{% endfor %} volumeMounts: - name: kibana mountPath: /etc/kibana/keys diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml index 00e04b9f2..b7a89a723 100644 --- a/roles/openshift_manage_node/defaults/main.yml +++ b/roles/openshift_manage_node/defaults/main.yml @@ -1,9 +1,5 @@ --- # openshift_manage_node_is_master is set at the play level. openshift_manage_node_is_master: False - -# Default is to be schedulable except for master nodes. -l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" - openshift_master_node_labels: node-role.kubernetes.io/master: 'true' diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml index 4f00351b5..e5753d185 100644 --- a/roles/openshift_manage_node/tasks/config.yml +++ b/roles/openshift_manage_node/tasks/config.yml @@ -2,7 +2,7 @@ - name: Set node schedulability oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" - schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" + schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}" retries: 10 delay: 5 register: node_schedulable @@ -23,5 +23,5 @@ delegate_to: "{{ openshift_master_host }}" vars: l_node_labels: "{{ openshift_node_labels | default({}) }}" - l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}" + l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}" l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}" diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index b5e234b7f..57bc97e3e 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30 # # Choose 'miq-template' for a podified database install # Choose 'miq-template-ext-db' for an external database install +# TODO: Swap this var declaration once CFME is fully supported +#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}" openshift_management_app_template: miq-template # If you are using the miq-template-ext-db template then you must add # the required database parameters to the diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml index e45ea8d43..80318fec0 100644 --- a/roles/openshift_management/tasks/accounts.yml +++ b/roles/openshift_management/tasks/accounts.yml @@ -5,14 +5,14 @@ oc_serviceaccount: namespace: "{{ openshift_management_project }}" state: present - name: "{{ openshift_management_flavor_short }}{{ item.name }}" + name: "{{ __openshift_management_flavor_short }}{{ item.name }}" with_items: - "{{ __openshift_system_account_sccs }}" - name: Ensure the CFME system accounts have all the required SCCs oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: scc resource_name: "{{ item.resource_name }}" with_items: @@ -21,7 +21,7 @@ - name: Ensure the CFME system accounts have the required roles oc_adm_policy_user: namespace: "{{ openshift_management_project }}" - user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" + user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}" resource_kind: role resource_name: "{{ item.resource_name }}" with_items: diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index c4b204b98..5209eba56 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -71,15 +71,15 @@ # CREATE APP - name: Note the correct ext-db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" + openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db - name: Note the correct podified db template name set_fact: - openshift_management_template_name: "{{ openshift_management_flavor }}" + openshift_management_template_name: "{{ __openshift_management_flavor }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App is created oc_process: @@ -89,7 +89,7 @@ params: "{{ openshift_management_template_parameters }}" - name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries - command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}" register: app_seeding_logs until: app_seeding_logs.stdout.find('Server starting complete') != -1 delay: 30 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index d1b9a8d5c..1f8cac6c6 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -12,7 +12,7 @@ when: - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Note the DB PV Size from Template Parameters set_fact: @@ -31,7 +31,7 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-app" + name: "{{ __openshift_management_flavor_short }}-app" register: miq_app_pv_check - name: Check if the Management DB PV has been created @@ -39,15 +39,15 @@ namespace: "{{ openshift_management_project }}" state: list kind: pv - name: "{{ openshift_management_flavor_short }}-db" + name: "{{ __openshift_management_flavor_short }}-db" register: miq_db_pv_check when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-app-pv" + template_name: "{{ __openshift_management_flavor }}-app-pv" create: True params: PV_SIZE: "{{ openshift_management_app_pv_size }}" @@ -58,12 +58,12 @@ - name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" - template_name: "{{ openshift_management_flavor }}-db-pv" + template_name: "{{ __openshift_management_flavor }}-db-pv" create: True params: PV_SIZE: "{{ openshift_management_db_pv_size }}" BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}" NFS_HOST: "{{ openshift_management_nfs_server }}" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 9e3a4d43a..4a00efb1d 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -17,8 +17,8 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" - name: Create the DB export @@ -27,10 +27,10 @@ tasks_from: create_export vars: l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" - l_nfs_export_config: "{{ openshift_management_flavor_short }}" - l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" + l_nfs_export_config: "{{ __openshift_management_flavor_short }}" + l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db" l_nfs_options: "*(rw,no_root_squash,no_wdelay)" when: - - openshift_management_app_template in ['miq-template', 'cfme-template'] + - not __openshift_management_use_ext_db delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 9f97cdcb9..f40af7349 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -13,59 +13,59 @@ ###################################################################### # STANDARD PODIFIED DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" register: miq_server_check - when: miq_server_check.results.results == [{}] block: - name: Copy over Management Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}" + name: "{{ __openshift_management_flavor }}" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml" ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +- when: __openshift_management_use_ext_db block: - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" register: miq_ext_db_server_check - when: miq_ext_db_server_check.results.results == [{}] block: - name: Copy over Management Ext-DB Server template copy: - src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-ext-db" + name: "{{ __openshift_management_flavor }}-ext-db" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml" # End app template creation. ###################################################################### @@ -79,50 +79,50 @@ namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" register: miq_app_pv_check - when: miq_app_pv_check.results.results == [{}] block: - name: Copy over Management App PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-app-pv" + name: "{{ __openshift_management_flavor }}-app-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml" #--------------------------------------------------------------------- # Required for database if the installation is fully podified -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db block: - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list kind: template - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" register: miq_db_pv_check - when: miq_db_pv_check.results.results == [{}] block: - name: Copy over Management DB PV template copy: - src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" - name: "{{ openshift_management_flavor }}-db-pv" + name: "{{ __openshift_management_flavor }}-db-pv" state: present kind: template files: - - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" + - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index b22f36a4f..2dc895190 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -100,4 +100,4 @@ 'openshift_management_template_parameters'" with_items: "{{ __openshift_management_required_db_conn_params }}" when: - - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] + - __openshift_management_use_ext_db diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml index da3ad0af7..d7b18df3a 100644 --- a/roles/openshift_management/vars/main.yml +++ b/roles/openshift_management/vars/main.yml @@ -30,14 +30,18 @@ __openshift_management_db_parameters: - DATABASE_PORT - DATABASE_NAME -# # Commented out until we can support both CFME and MIQ -# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" -#openshift_management_flavor: cloudforms -openshift_management_flavor: manageiq -# TODO: Make this conditional as well based on the prior variable -# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" -# openshift_management_flavor_short: cfme -openshift_management_flavor_short: miq +__openshift_management_flavors: + miq: + short: miq + long: manageiq + cfme: + short: cfme + long: cloudforms + +__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}" +__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}" + +__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}" ###################################################################### # ACCOUNTING diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b12a6b346..680e4a4ff 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -137,17 +137,8 @@ - item.clientCA | default('') != '' with_items: "{{ openshift.master.identity_providers }}" -# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. -# The template file will stomp any other settings made. -- block: - - name: check whether our docker-registry setting exists in the env file - command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master" - failed_when: false - changed_when: false - register: l_already_set - - - set_fact: - openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +- name: Include push_via_dns.yml + include_tasks: push_via_dns.yml - name: Set fact of all etcd host IPs openshift_facts: @@ -227,7 +218,7 @@ - pause: seconds: 15 when: - - openshift.master.ha | bool + - openshift_master_ha | bool - name: Start and enable master api all masters systemd: diff --git a/roles/openshift_master/tasks/push_via_dns.yml b/roles/openshift_master/tasks/push_via_dns.yml new file mode 100644 index 000000000..c5876130a --- /dev/null +++ b/roles/openshift_master/tasks/push_via_dns.yml @@ -0,0 +1,13 @@ +--- +# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. +# The template file will stomp any other settings made. +- when: openshift_push_via_dns is not defined + block: + - name: check whether our docker-registry setting exists in the env file + shell: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master*" + failed_when: false + changed_when: false + register: l_already_set + + - set_fact: + openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml index 715347101..f7697067a 100644 --- a/roles/openshift_master/tasks/restart.yml +++ b/roles/openshift_master/tasks/restart.yml @@ -3,7 +3,6 @@ service: name: "{{ openshift_service_type }}-master-api" state: restarted - when: openshift_master_ha | bool - name: Wait for master API to come back online wait_for: host: "{{ openshift.common.hostname }}" @@ -11,12 +10,10 @@ delay: 10 port: "{{ openshift.master.api_port }}" timeout: 600 - when: openshift_master_ha | bool -- name: Restart master controllers - service: - name: "{{ openshift_service_type }}-master-controllers" - state: restarted - # Ignore errrors since it is possible that type != simple for - # pre-3.1.1 installations. - ignore_errors: true - when: openshift_master_ha | bool +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 870ab7c57..aeff64983 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,6 +1,8 @@ --- # systemd_units.yml is included both in the openshift_master role and in the upgrade # playbooks. +- name: include push_via_dns.yml tasks + include_tasks: push_via_dns.yml - name: Set HA Service Info for containerized installs set_fact: @@ -9,7 +11,8 @@ when: - openshift_is_containerized | bool -- include_tasks: registry_auth.yml +- name: include registry_auth tasks + include_tasks: registry_auth.yml - name: Disable the legacy master service if it exists systemd: diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index 14023ea73..4c9ab1864 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -5,6 +5,7 @@ admissionConfig: apiLevels: - v1 apiVersion: v1 +{% if not openshift.common.version_gte_3_9 %} assetConfig: logoutURL: "{{ openshift.master.logout_url | default('') }}" masterPublicURL: {{ openshift.master.public_api_url }} @@ -41,6 +42,8 @@ assetConfig: - {{ cipher_suite }} {% endfor %} {% endif %} +# assetconfig end +{% endif %} {% if openshift.master.audit_config | default(none) is not none %} auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }} {% endif %} diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 8da74430f..293d8f451 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc openshift_metrics_node_id: nodename openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_prefix: metrics-cassandra openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}" openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml new file mode 100644 index 000000000..6aa48f9c3 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml @@ -0,0 +1,46 @@ +--- +- name: Check to see if PVC already exists + oc_obj: + state: list + kind: pvc + name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + namespace: "{{ openshift_metrics_project }}" + register: _metrics_pvc + +# _metrics_pvc.results.results | length > 0 returns a false positive +# so we check for the presence of 'stderr' to determine if the obj exists or not +# the RC for existing and not existing is both 0 +- when: + - _metrics_pvc.results.stderr is defined + block: + - name: generate hawkular-cassandra persistent volume claims + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" + vars: + obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + labels: + metrics-infra: hawkular-cassandra + access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" + size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" + when: + - openshift_metrics_cassandra_storage_type != 'emptydir' + - openshift_metrics_cassandra_storage_type != 'dynamic' + changed_when: false + + - name: generate hawkular-cassandra persistent volume claims (dynamic) + template: + src: pvc.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" + vars: + obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" + labels: + metrics-infra: hawkular-cassandra + access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" + size: "{{ openshift_metrics_cassandra_pvc_size }}" + pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" + storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" + when: openshift_metrics_cassandra_storage_type == 'dynamic' + changed_when: false diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 9026cc897..158e596ec 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -25,36 +25,7 @@ - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics" when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" -- name: generate hawkular-cassandra persistent volume claims - template: - src: pvc.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" - vars: - obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" - labels: - metrics-infra: hawkular-cassandra - access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" - size: "{{ openshift_metrics_cassandra_pvc_size }}" - pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" - storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" - with_sequence: count={{ openshift_metrics_cassandra_replicas }} - when: - - openshift_metrics_cassandra_storage_type != 'emptydir' - - openshift_metrics_cassandra_storage_type != 'dynamic' - changed_when: false - -- name: generate hawkular-cassandra persistent volume claims (dynamic) - template: - src: pvc.j2 - dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" - vars: - obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" - labels: - metrics-infra: hawkular-cassandra - access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" - size: "{{ openshift_metrics_cassandra_pvc_size }}" - pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" - storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" +- include_tasks: generate_cassandra_pvcs.yaml with_sequence: count={{ openshift_metrics_cassandra_replicas }} - when: openshift_metrics_cassandra_storage_type == 'dynamic' - changed_when: false + loop_control: + loop_var: metrics_pvc_index diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 0dd5d1621..f05c8968d 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -67,8 +67,8 @@ with_items: "{{ hawkular_agent_object_defs.results }}" when: openshift_metrics_install_hawkular_agent | bool -# TODO: Remove when asset config is removed from master-config.yaml - include_tasks: update_master_config.yaml + when: not openshift.common.version_gte_3_9 # Update asset config in openshift-web-console namespace - name: Add metrics route information to web console asset config @@ -79,7 +79,9 @@ console_config_edits: - key: clusterInfo#metricsPublicURL value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 - command: > {{openshift_client_binary}} diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1664e9975..ed849916d 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -28,4 +28,6 @@ console_config_edits: - key: clusterInfo#metricsPublicURL value: "" - when: openshift_web_console_install | default(true) | bool + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 0fe4c2035..9f887891b 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages: - yum-utils # gluster - glusterfs-fuse +- device-mapper-multipath # nfs - nfs-utils - flannel diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index a8048c42f..e31433dbc 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,33 @@ --- - name: Install iSCSI storage plugin dependencies - package: name=iscsi-initiator-utils state=present + package: + name: "{{ item }}" + state: present when: not openshift_is_atomic | bool register: result until: result is succeeded + with_items: + - iscsi-initiator-utils + - device-mapper-multipath + +- name: restart services + systemd: + name: "{{ item }}" + state: started + enabled: True + when: not openshift_is_atomic | bool + with_items: + - multipathd + - rpcbind + +- name: Template multipath configuration + template: + dest: "/etc/multipath.conf" + src: multipath.conf.j2 + backup: true + when: not openshift_is_atomic | bool + +#enable multipath +- name: Enable multipath + command: "mpathconf --enable" + when: not openshift_is_atomic | bool diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2 new file mode 100644 index 000000000..8a0abc2c1 --- /dev/null +++ b/roles/openshift_node/templates/multipath.conf.j2 @@ -0,0 +1,15 @@ +# LIO iSCSI +# TODO: Add env variables for tweaking +devices { + device { + vendor "LIO-ORG" + user_friendly_names "yes" + path_grouping_policy "failover" + path_selector "round-robin 0" + failback immediate + path_checker "tur" + prio "const" + no_path_retry 120 + rr_weight "uniform" + } +} diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 77be1f2b1..2bdb81632 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -93,3 +93,8 @@ openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size openshift_openstack_etcd_volume_size: 2 openshift_openstack_lb_volume_size: 5 openshift_openstack_ephemeral_volumes: false + + +# cloud-config +openshift_openstack_disable_root: true +openshift_openstack_user: openshift diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index 32c6b5838..9015c561f 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index 1bf366bdc..917347073 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %} DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %} VG="{{ openshift_openstack_container_storage_setup.docker_vg }}" DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}" STORAGE_DRIVER=overlay2 diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 8e7c6288a..1d3173022 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -418,6 +418,10 @@ resources: protocol: tcp port_range_min: 443 port_range_max: 443 + - direction: ingress + protocol: tcp + port_range_min: 1936 + port_range_max: 1936 cns-secgrp: type: OS::Neutron::SecurityGroup diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 29b09f3c9..9aeecfa74 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -261,11 +261,12 @@ resources: properties: size: { get_param: volume_size } availability_zone: { get_param: availability_zone } + metadata: + purpose: openshift_docker_storage volume_attachment: type: OS::Cinder::VolumeAttachment properties: volume_id: { get_resource: cinder_volume } instance_uuid: { get_resource: server } - mountpoint: /dev/sdb {% endif %} diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 index eb65f7cec..ccaa5d464 100644 --- a/roles/openshift_openstack/templates/user_data.j2 +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -1,9 +1,9 @@ #cloud-config -disable_root: true +disable_root: {{ openshift_openstack_disable_root }} system_info: default_user: - name: openshift + name: {{ openshift_openstack_user }} sudo: ["ALL=(ALL) NOPASSWD: ALL"] write_files: diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml index 1b21c4739..37a05f3f0 100644 --- a/roles/openshift_prometheus/defaults/main.yaml +++ b/roles/openshift_prometheus/defaults/main.yaml @@ -7,9 +7,24 @@ openshift_prometheus_namespace: openshift-metrics # defaults hosts for routes openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} +openshift_prometheus_alertmanager_hostname: alertmanager-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}} + openshift_prometheus_node_selector: {"region":"infra"} +openshift_prometheus_service_port: 443 +openshift_prometheus_service_targetport: 8443 +openshift_prometheus_service_name: prometheus +openshift_prometheus_alerts_service_targetport: 9443 +openshift_prometheus_alerts_service_name: alerts +openshift_prometheus_alertmanager_service_targetport: 10443 +openshift_prometheus_alertmanager_service_name: alertmanager +openshift_prometheus_serviceaccount_annotations: [] +l_openshift_prometheus_serviceaccount_annotations: + - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' + - serviceaccounts.openshift.io/oauth-redirectreference.alertmanager='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' + # additional prometheus rules file openshift_prometheus_additional_rules_file: null diff --git a/roles/openshift_prometheus/tasks/facts.yaml b/roles/openshift_prometheus/tasks/facts.yaml new file mode 100644 index 000000000..214089732 --- /dev/null +++ b/roles/openshift_prometheus/tasks/facts.yaml @@ -0,0 +1,10 @@ +--- +# The kubernetes version impacts the prometheus scraping endpoint +# so gathering it before constructing the configmap +- name: get oc version + oc_version: + register: oc_version + +- set_fact: + kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" + openshift_prometheus_serviceaccount_annotations: "{{ l_openshift_prometheus_serviceaccount_annotations + openshift_prometheus_serviceaccount_annotations|list }}" diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index 749df5152..0b565502f 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -1,4 +1,6 @@ --- +# set facts +- include_tasks: facts.yaml # namespace - name: Add prometheus project @@ -9,7 +11,7 @@ description: Prometheus # secrets -- name: Set alert and prometheus secrets +- name: Set alert, alertmanager and prometheus secrets oc_secret: state: present name: "{{ item }}-proxy" @@ -20,30 +22,24 @@ with_items: - prometheus - alerts + - alertmanager # serviceaccount - name: create prometheus serviceaccount oc_serviceaccount: state: present - name: prometheus + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - # TODO add annotations when supproted - # annotations: - # serviceaccounts.openshift.io/oauth-redirectreference.prom: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - # serviceaccounts.openshift.io/oauth-redirectreference.alerts: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - - secrets: - - prometheus-secrets changed_when: no + # TODO remove this when annotations are supported by oc_serviceaccount - name: annotate serviceaccount command: > {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - serviceaccount prometheus - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}' - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' - + serviceaccount {{ openshift_prometheus_service_name }} {{ item }} + with_items: + "{{ openshift_prometheus_serviceaccount_annotations }}" # create clusterrolebinding for prometheus serviceaccount - name: Set cluster-reader permissions for prometheus @@ -52,63 +48,61 @@ namespace: "{{ openshift_prometheus_namespace }}" resource_kind: cluster-role resource_name: cluster-reader - user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus" + user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_service_name }}" + -# create prometheus and alerts services -# TODO join into 1 task with loop -- name: Create prometheus service +- name: create services for prometheus oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" - selector: - app: prometheus labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + oprometheus.io/scrape: 'true' + oprometheus.io/scheme: https + service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls ports: - - port: 443 - targetPort: 8443 - with_items: - - name: prometheus + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_service_targetport }}" + protocol: TCP + selector: + app: prometheus -- name: Create alerts service +- name: create services for alert buffer oc_service: - state: present - name: "{{ item.name }}" + name: "{{ openshift_prometheus_alerts_service_name }}" namespace: "{{ openshift_prometheus_namespace }}" + labels: + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alerts-tls + ports: + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alerts_service_targetport }}" + protocol: TCP selector: app: prometheus + +- name: create services for alertmanager + oc_service: + name: "{{ openshift_prometheus_alertmanager_service_name }}" + namespace: "{{ openshift_prometheus_namespace }}" labels: - name: "{{ item.name }}" - # TODO add annotations when supported - # annotations: - # service.alpha.openshift.io/serving-cert-secret-name: "{{item.name}}-tls" + name: prometheus + annotations: + service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls ports: - - port: 443 - targetPort: 9443 - with_items: - - name: alerts - - -# Annotate services with secret name -# TODO remove this when annotations are supported by oc_service -- name: annotate prometheus service - command: > - {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service prometheus - prometheus.io/scrape='true' - prometheus.io/scheme=https - service.alpha.openshift.io/serving-cert-secret-name=prometheus-tls - -- name: annotate alerts service - command: > - {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} - service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls' + - name: prometheus + port: "{{ openshift_prometheus_service_port }}" + targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}" + protocol: TCP + selector: + app: prometheus # create prometheus and alerts routes +# TODO: oc_route module should support insecureEdgeTerminationPolicy: Redirect - name: create prometheus and alerts routes oc_route: state: present @@ -122,6 +116,8 @@ host: "{{ openshift_prometheus_hostname }}" - name: alerts host: "{{ openshift_prometheus_alerts_hostname }}" + - name: alertmanager + host: "{{ openshift_prometheus_alertmanager_hostname }}" # Storage - name: create prometheus pvc @@ -169,15 +165,6 @@ path: "{{ tempdir }}/prometheus.additional.rules" register: additional_rules_stat -# The kubernetes version impacts the prometheus scraping endpoint -# so gathering it before constructing the configmap -- name: get oc version - oc_version: - register: oc_version - -- set_fact: - kubernetes_version: "{{ oc_version.results.kubernetes_short | float }}" - - template: src: prometheus.yml.j2 dest: "{{ tempdir }}/prometheus.yml" @@ -219,7 +206,7 @@ - name: Set alertmanager configmap oc_configmap: state: present - name: "prometheus-alerts" + name: "alertmanager" namespace: "{{ openshift_prometheus_namespace }}" from_file: alertmanager.yml: "{{ tempdir }}/alertmanager.yml" diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index b859eb111..66d65a3f2 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -16,9 +16,11 @@ - name: Create templates subdirectory file: state: directory - path: "{{ tempdir }}/templates" + path: "{{ tempdir }}/{{ item }}" mode: 0755 changed_when: False + with_items: + - templates - include_tasks: install_prometheus.yaml when: openshift_prometheus_state == 'present' diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall.yaml index d746402db..d746402db 100644 --- a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/uninstall.yaml diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2 index d780550b8..c0abd483b 100644 --- a/roles/openshift_prometheus/templates/prometheus.j2 +++ b/roles/openshift_prometheus/templates/prometheus.j2 @@ -19,7 +19,7 @@ spec: labels: app: prometheus spec: - serviceAccountName: prometheus + serviceAccountName: "{{ openshift_prometheus_service_name }}" {% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %} nodeSelector: {% for key, value in openshift_prometheus_node_selector.items() %} @@ -47,15 +47,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 8443 + - containerPort: {{ openshift_prometheus_service_targetport }} name: web args: - -provider=openshift - - -https-address=:8443 + - -https-address=:{{ openshift_prometheus_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9090 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -67,9 +67,9 @@ spec: - -skip-auth-regex=^/metrics volumeMounts: - mountPath: /etc/tls/private - name: prometheus-tls + name: prometheus-tls-secret - mountPath: /etc/proxy/secrets - name: prometheus-secrets + name: prometheus-proxy-secret - mountPath: /prometheus name: prometheus-data @@ -104,7 +104,7 @@ spec: - mountPath: /prometheus name: prometheus-data - # Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy + # Deploy alert-buffer behind oauth alerts-proxy - name: alerts-proxy image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" imagePullPolicy: IfNotPresent @@ -124,15 +124,15 @@ spec: cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" {% endif %} ports: - - containerPort: 9443 + - containerPort: {{ openshift_prometheus_alerts_service_targetport }} name: web args: - -provider=openshift - - -https-address=:9443 + - -https-address=:{{ openshift_prometheus_alerts_service_targetport }} - -http-address= - -email-domain=* - -upstream=http://localhost:9099 - - -client-id=system:serviceaccount:{{ namespace }}:prometheus + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' - -tls-cert=/etc/tls/private/tls.crt @@ -143,9 +143,9 @@ spec: - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt volumeMounts: - mountPath: /etc/tls/private - name: alerts-tls + name: alerts-tls-secret - mountPath: /etc/proxy/secrets - name: alerts-secrets + name: alerts-proxy-secret - name: alert-buffer args: @@ -169,11 +169,54 @@ spec: {% endif %} volumeMounts: - mountPath: /alert-buffer - name: alert-buffer-data + name: alerts-data ports: - containerPort: 9099 name: alert-buf + # Deploy alertmanager behind oauth alertmanager-proxy + - name: alertmanager-proxy + image: "{{ l_openshift_prometheus_proxy_image_prefix }}oauth-proxy:{{ l_openshift_prometheus_proxy_image_version }}" + imagePullPolicy: IfNotPresent + requests: +{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}" +{% endif %} + limits: +{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %} + memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}" +{% endif %} +{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %} + cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}" +{% endif %} + ports: + - containerPort: {{ openshift_prometheus_alertmanager_service_targetport }} + name: web + args: + - -provider=openshift + - -https-address=:{{ openshift_prometheus_alertmanager_service_targetport }} + - -http-address= + - -email-domain=* + - -upstream=http://localhost:9093 + - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }} + - -openshift-ca=/etc/pki/tls/cert.pem + - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}' + - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}' + - -tls-cert=/etc/tls/private/tls.crt + - -tls-key=/etc/tls/private/tls.key + - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - -cookie-secret-file=/etc/proxy/secrets/session_secret + - -skip-auth-regex=^/metrics + volumeMounts: + - mountPath: /etc/tls/private + name: alertmanager-tls-secret + - mountPath: /etc/proxy/secrets + name: alertmanager-proxy-secret + - name: alertmanager args: - -config.file=/etc/alertmanager/alertmanager.yml @@ -205,14 +248,15 @@ spec: restartPolicy: Always volumes: + - name: prometheus-config configMap: defaultMode: 420 name: prometheus - - name: prometheus-secrets + - name: prometheus-proxy-secret secret: secretName: prometheus-proxy - - name: prometheus-tls + - name: prometheus-tls-secret secret: secretName: prometheus-tls - name: prometheus-data @@ -225,13 +269,19 @@ spec: - name: alertmanager-config configMap: defaultMode: 420 - name: prometheus-alerts - - name: alerts-secrets + name: alertmanager + - name: alertmanager-proxy-secret secret: - secretName: alerts-proxy - - name: alerts-tls + secretName: alertmanager-proxy + - name: alertmanager-tls-secret + secret: + secretName: alertmanager-tls + - name: alerts-tls-secret secret: - secretName: prometheus-alerts-tls + secretName: alerts-tls + - name: alerts-proxy-secret + secret: + secretName: alerts-proxy - name: alertmanager-data {% if openshift_prometheus_alertmanager_storage_type == 'pvc' %} persistentVolumeClaim: @@ -239,7 +289,7 @@ spec: {% else %} emptydir: {} {% endif %} - - name: alert-buffer-data + - name: alerts-data {% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %} persistentVolumeClaim: claimName: {{ openshift_prometheus_alertbuffer_pvc_name }} diff --git a/roles/openshift_prometheus/templates/prometheus.yml.j2 b/roles/openshift_prometheus/templates/prometheus.yml.j2 index 63430f834..005c2c564 100644 --- a/roles/openshift_prometheus/templates/prometheus.yml.j2 +++ b/roles/openshift_prometheus/templates/prometheus.yml.j2 @@ -1,10 +1,5 @@ rule_files: - - 'prometheus.rules' -{% if openshift_prometheus_additional_rules_file is defined and openshift_prometheus_additional_rules_file is not none %} - - 'prometheus.additional.rules' -{% endif %} - - + - '*.rules' # A scrape configuration for running Prometheus on a Kubernetes cluster. # This uses separate scrape configs for cluster components (i.e. API server, node) @@ -39,31 +34,11 @@ scrape_configs: action: keep regex: default;kubernetes;https -# Scrape config for nodes. -# -# Each node exposes a /metrics endpoint that contains operational metrics for -# the Kubelet and other components. -- job_name: 'kubernetes-nodes' - - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - # Scrape config for controllers. # # Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for # the controllers. # -# TODO: move this to a pure endpoints based metrics gatherer when controllers are exposed via -# endpoints. - job_name: 'kubernetes-controllers' scheme: https @@ -87,6 +62,27 @@ scrape_configs: regex: (.+)(?::\d+) replacement: $1:8444 +# Scrape config for nodes. +# +# Each node exposes a /metrics endpoint that contains operational metrics for +# the Kubelet and other components. +- job_name: 'kubernetes-nodes' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + # Drop a very high cardinality metric that is incorrect in 3.7. It will be + # fixed in 3.9. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'openshift_sdn_pod_(setup|teardown)_latency(.*)' + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + # Scrape config for cAdvisor. # # Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that @@ -107,6 +103,14 @@ scrape_configs: kubernetes_sd_configs: - role: node + # Exclude a set of high cardinality metrics that can contribute to significant + # memory use in large clusters. These can be selectively enabled as necessary + # for medium or small clusters. + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'container_(cpu_user_seconds_total|cpu_cfs_periods_total|memory_usage_bytes|memory_swap|memory_working_set_bytes|memory_cache|last_seen|fs_(read_seconds_total|write_seconds_total|sector_(.*)|io_(.*)|reads_merged_total|writes_merged_total)|tasks_state|memory_failcnt|memory_failures_total|spec_memory_swap_limit_bytes|fs_(.*)_bytes_total|spec_(.*))' + relabel_configs: - action: labelmap regex: __meta_kubernetes_node_label_(.+) @@ -133,38 +137,101 @@ scrape_configs: - role: endpoints relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + # only scrape infrastructure components + - source_labels: [__meta_kubernetes_namespace] + action: keep + regex: 'default|logging|metrics|kube-.+|openshift|openshift-.+' + # drop infrastructure components managed by other scrape targets + - source_labels: [__meta_kubernetes_service_name] + action: drop + regex: 'prometheus-node-exporter' + # only those that have requested scraping + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +# Scrape config for node-exporter, which is expected to be running on port 9100. +- job_name: 'kubernetes-nodes-exporter' + + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + + kubernetes_sd_configs: + - role: node + + metric_relabel_configs: + - source_labels: [__name__] + action: drop + regex: 'node_cpu|node_(disk|scrape_collector)_.+' + # preserve a subset of the network, netstat, vmstat, and filesystem series + - source_labels: [__name__] action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + regex: '(node_(netstat_Ip_.+|vmstat_(nr|thp)_.+|filesystem_(free|size|device_error)|network_(transmit|receive)_(drop|errs)))' + target_label: __name__ + replacement: renamed_$1 + - source_labels: [__name__] + action: drop + regex: 'node_(netstat|vmstat|filesystem|network)_.+' + - source_labels: [__name__] action: replace + regex: 'renamed_(.+)' + target_label: __name__ + replacement: $1 + # drop any partial expensive series + - source_labels: [__name__, device] + action: drop + regex: 'node_network_.+;veth.+' + - source_labels: [__name__, mountpoint] + action: drop + regex: 'node_filesystem_(free|size|device_error);([^/].*|/.+)' + + relabel_configs: + - source_labels: [__address__] + regex: '(.*):10250' + replacement: '${1}:9100' target_label: __address__ - regex: (.+)(?::\d+);(\d+) - replacement: $1:$2 - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_username] - action: replace - target_label: __basic_auth_username__ - regex: (.+) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_password] - action: replace - target_label: __basic_auth_password__ - regex: (.+) + - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname] + target_label: __instance__ - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name + regex: __meta_kubernetes_node_label_(.+) + +# Scrape config for the template service broker +- job_name: 'openshift-template-service-broker' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + server_name: apiserver.openshift-template-service-broker.svc + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: openshift-template-service-broker;apiserver;https + alerting: alertmanagers: diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml index a6f040831..34ba78404 100644 --- a/roles/openshift_provisioners/defaults/main.yaml +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -1,7 +1,5 @@ --- openshift_provisioners_install_provisioners: True -openshift_provisioners_image_prefix: docker.io/openshift/origin- -openshift_provisioners_image_version: latest openshift_provisioners_efs: False openshift_provisioners_efs_path: /persistentvolumes @@ -10,3 +8,11 @@ openshift_provisioners_efs_nodeselector: "" openshift_provisioners_efs_supplementalgroup: '65534' openshift_provisioners_project: openshift-infra + +openshift_provisioners_image_prefix_dict: + origin: "docker.io/openshift/origin-" + openshift-enterprise: "registry.access.redhat.com/openshift3/ose-" + +openshift_provisioners_image_version_dict: + origin: "latest" + openshift-enterprise: "{{ openshift_image_tag }}" diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml index 4ba26b2b8..d00573b07 100644 --- a/roles/openshift_provisioners/tasks/main.yaml +++ b/roles/openshift_provisioners/tasks/main.yaml @@ -12,6 +12,11 @@ check_mode: no tags: provisioners_init +- name: Set eventrouter image facts + set_fact: + openshift_provisioners_image_prefix: "{{ openshift_provisioners_image_prefix | default(openshift_provisioners_image_prefix_dict[openshift_deployment_type]) }}" + openshift_provisioners_image_version: "{{ openshift_provisioners_image_version | default(openshift_provisioners_image_version_dict[openshift_deployment_type]) }}" + - include_tasks: install_provisioners.yaml when: openshift_provisioners_install_provisioners | default(false) | bool diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml index 795b8ee60..b1ddbc07a 100644 --- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml +++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml @@ -2,15 +2,18 @@ - name: Check for usage of deprecated variables set_fact: - __deprecation_message: "{{ __deprecation_message | default([]) }} + ['{{ __deprecation_header }} {{ item }} is a deprecated variable and will be no longer be used in the next minor release. Please update your inventory accordingly.']" + __deprecation_message: "{{ __deprecation_message | default( __deprecation_header ) }} \n\t{{ item }}" when: - hostvars[inventory_hostname][item] is defined with_items: "{{ __warn_deprecated_vars }}" - block: - debug: msg="{{__deprecation_message}}" - - pause: - seconds: "{{ 10 }}" + - run_once: true + set_stats: + data: + installer_phase_initialize: + message: "{{ __deprecation_message }}" when: - __deprecation_message | default ('') | length > 0 diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 62d460272..08dfd8284 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -61,3 +61,17 @@ when: - template_service_broker_remove | default(false) | bool - template_service_broker_install | default(true) | bool + +- name: Ensure that all requires vsphere configuration variables are set + fail: + msg: > + When the vSphere cloud provider is configured you must define all of these variables: + openshift_cloudprovider_vsphere_username, openshift_cloudprovider_vsphere_password, + openshift_cloudprovider_vsphere_host, openshift_cloudprovider_vsphere_datacenter, + openshift_cloudprovider_vsphere_datastore + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind == 'vsphere' + - ( openshift_cloudprovider_vsphere_username is undefined or openshift_cloudprovider_vsphere_password is undefined or + openshift_cloudprovider_vsphere_host is undefined or openshift_cloudprovider_vsphere_datacenter is undefined or + openshift_cloudprovider_vsphere_datastore is undefined ) diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index df15948d2..51c6e0a64 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,6 +1,6 @@ --- -__deprecation_header: "[DEPRECATION WARNING]:" +__deprecation_header: "[DEPRECATION WARNING]: The following are deprecated variables and will be no longer be used in the next minor release. Please update your inventory accordingly." # this is a list of variables that we will be deprecating within the next minor release, this list should be expected to change from release to release __warn_deprecated_vars: diff --git a/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml new file mode 100644 index 000000000..28abcbcfc --- /dev/null +++ b/roles/openshift_service_catalog/files/openshift_catalog_clusterroles.yml @@ -0,0 +1,86 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: system:service-catalog:aggregate-to-admin +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: system:service-catalog:aggregate-to-edit +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - create + - update + - delete + - get + - list + - watch + - patch +- apiGroups: + - "settings.k8s.io" + attributeRestrictions: null + resources: + - podpresets + verbs: + - create + - update + - delete + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:service-catalog:aggregate-to-view +rules: +- apiGroups: + - "servicecatalog.k8s.io" + attributeRestrictions: null + resources: + - serviceinstances + - servicebindings + verbs: + - get + - list + - watch diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 9b38a85c4..4d06c1872 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -74,74 +74,17 @@ template_name: kube-system-service-catalog-role-bindings namespace: kube-system -- oc_obj: - name: edit - kind: clusterrole - state: list - register: edit_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/edit - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/edit_sc_patch.yml" - vars: - original_content: "{{ edit_yaml.results.results[0] | to_yaml }}" - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update edit role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml - when: - - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: admin - kind: clusterrole - state: list - register: admin_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/admin - template: - src: sc_admin_edit_role_patching.j2 - dest: "{{ mktemp.stdout }}/admin_sc_patch.yml" - vars: - original_content: "{{ admin_yaml.results.results[0] | to_yaml }}" - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update admin role for service catalog and pod preset access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml - when: - - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) - -- oc_obj: - name: view - kind: clusterrole - state: list - register: view_yaml - -# only do this if we don't already have the updated role info -- name: Generate apply template for clusterrole/view - template: - src: sc_view_role_patching.j2 - dest: "{{ mktemp.stdout }}/view_sc_patch.yml" - vars: - original_content: "{{ view_yaml.results.results[0] | to_yaml }}" - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) - -# only do this if we don't already have the updated role info -- name: update view role for service catalog access - command: > - {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml - when: - - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +- copy: + src: openshift_catalog_clusterroles.yml + dest: "{{ mktemp.stdout }}/openshift_catalog_clusterroles.yml" + +- name: Apply Service Catalog cluster roles + retries: 5 + delay: 2 + register: task_result + until: task_result.rc == 0 + shell: > + {{ openshift_client_binary }} auth reconcile --config={{ openshift.common.config_base }}/master/admin.kubeconfig -f {{ mktemp.stdout}}/openshift_catalog_clusterroles.yml - oc_adm_policy_user: namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 deleted file mode 100644 index 59cceafcf..000000000 --- a/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 +++ /dev/null @@ -1,27 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - create - - update - - delete - - get - - list - - watch - - patch -- apiGroups: - - "settings.k8s.io" - attributeRestrictions: null - resources: - - podpresets - verbs: - - create - - update - - delete - - get - - list - - watch diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 deleted file mode 100644 index 838993854..000000000 --- a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 +++ /dev/null @@ -1,11 +0,0 @@ -{{ original_content }} -- apiGroups: - - "servicecatalog.k8s.io" - attributeRestrictions: null - resources: - - serviceinstances - - servicebindings - verbs: - - get - - list - - watch diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index f7bd58db3..70a89b0ba 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -73,49 +73,51 @@ Role Variables This role has the following variables that control the integration of a GlusterFS cluster into a new or existing OpenShift cluster: -| Name | Default value | Description | -|--------------------------------------------------|-------------------------|-----------------------------------------| -| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources -| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized -| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names -| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name -| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' -| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods -| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service -| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' -| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod -| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned -| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes -| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service -| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' -| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod -| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage -| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage -| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** -| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized -| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible -| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' -| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods -| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin -| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes -| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. -| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode -| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes -| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path +| Name | Default value | Description | +|--------------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout | 300 | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace | 'glusterfs' | Namespace/project in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native | True | GlusterFS should be containerized +| openshift_storage_glusterfs_name | 'storage' | A name to identify the GlusterFS cluster, which will be used in resource names +| openshift_storage_glusterfs_nodeselector | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. +| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service +| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' +| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod +| openshift_storage_glusterfs_block_host_vol_create | True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned +| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes +| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes +| openshift_storage_glusterfs_block_storageclass | False | Automatically create a StorageClass for each Gluster Block cluster +| openshift_storage_glusterfs_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service +| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' +| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod +| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage +| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage +| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized +| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible +| openshift_storage_glusterfs_heketi_image | 'heketi/heketi' | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version | 'latest' | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key | auto-generated | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key | auto-generated | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load | True | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url | Undefined | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. +| openshift_storage_glusterfs_heketi_port | 8080 | TCP port for external heketi service **NOTE:** This has no effect in native mode +| openshift_storage_glusterfs_heketi_executor | 'kubernetes' | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes +| openshift_storage_glusterfs_heketi_ssh_port | 22 | SSH port for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path | openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` @@ -126,14 +128,16 @@ registry. These variables start with the prefix values in their corresponding non-registry variables. The following variables are an exception: -| Name | Default value | Description | -|-----------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above +| Name | Default value | Description | +|-----------------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name | 'registry' | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass | False | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default | False | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_block_storageclass | False | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_block_storageclass_default | False | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key | auto-generated | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key | auto-generated | Separate from the above Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 4cbe262d2..7e751cc7a 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest' openshift_storage_glusterfs_block_host_vol_create: True openshift_storage_glusterfs_block_host_vol_size: 100 openshift_storage_glusterfs_block_host_vol_max: 15 +openshift_storage_glusterfs_block_storageclass: False +openshift_storage_glusterfs_block_storageclass_default: False openshift_storage_glusterfs_s3_deploy: True openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" openshift_storage_glusterfs_s3_version: 'latest' @@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" +openshift_storage_glusterfs_registry_block_storageclass: False +openshift_storage_glusterfs_registry_block_storageclass_default: False openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}" openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" @@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow: port: "24008/tcp" - service: glusterfs_bricks port: "49152-49251/tcp" +- service: glusterblockd + port: "24010/tcp" +- service: iscsi-targets + port: "3260/tcp" +- service: rpcbind + port: "111/tcp" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 001578406..e6e261b52 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -313,7 +313,36 @@ - glusterfs_storageclass or glusterfs_s3_deploy - include_tasks: glusterblock_deploy.yml - when: glusterfs_block_deploy + when: + - glusterfs_block_deploy + #TODO: Remove this when multipathd will be available on atomic + - not openshift_is_atomic | bool + +- block: + - name: Create heketi block secret + oc_secret: + namespace: "{{ glusterfs_namespace }}" + state: present + name: "heketi-{{ glusterfs_name }}-admin-secret-block" + type: "gluster.org/glusterblock" + force: True + contents: + - path: key + data: "{{ glusterfs_heketi_admin_key }}" + when: glusterfs_heketi_admin_key is defined + - name: Generate Gluster Block StorageClass file + template: + src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2" + dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + + - name: Create Gluster Block StorageClass + oc_obj: + state: present + kind: storageclass + name: "glusterfs-{{ glusterfs_name }}-block" + files: + - "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + when: glusterfs_block_storageclass - include_tasks: gluster_s3_deploy.yml when: glusterfs_s3_deploy diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index a374df0ce..92de1b64d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 544a6f491..10c29fd37 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -17,6 +17,8 @@ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}" glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}" glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}" + glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}" + glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}" glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}" glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}" glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}" @@ -46,7 +48,7 @@ glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}" glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}" glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}" - glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" + glusterfs_nodes: "{% if groups.glusterfs_registry is defined and groups['glusterfs_registry'] | length > 0 %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined and groups['glusterfs'] | length > 0 %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" - include_tasks: glusterfs_common.yml when: diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf index dd4d6e6f7..bcc02e217 100644 --- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -1,4 +1,7 @@ #{{ ansible_managed }} dm_thin_pool dm_snapshot -dm_mirror
\ No newline at end of file +dm_mirror +#glusterblock +dm_multipath +target_core_user diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} + annotations: + storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: + resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" + restuser: "admin" + chapauthenabled: "true" + hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} + restsecretnamespace: "{{ glusterfs_namespace }}" + restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml index e2e6538c9..513dff045 100644 --- a/roles/openshift_version/defaults/main.yml +++ b/roles/openshift_version/defaults/main.yml @@ -10,3 +10,4 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ openshift_use_crio_only: False l_first_master_version_task_file: "{{ openshift_is_containerized | ternary('first_master_containerized_version.yml', 'first_master_rpm_version.yml') }}" +l_force_image_tag_to_version: False diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml index 374725086..b0d155c2c 100644 --- a/roles/openshift_version/tasks/first_master.yml +++ b/roles/openshift_version/tasks/first_master.yml @@ -19,12 +19,14 @@ - set_fact: openshift_pkg_version: -{{ openshift_version }} when: - - openshift_pkg_version is not defined + - openshift_pkg_version is not defined or openshift_pkg_version == "" - openshift_upgrade_target is not defined - block: - debug: - msg: "openshift_image_tag was not defined. Falling back to v{{ openshift_version }}" + msg: "openshift_image_tag set to v{{ openshift_version }}" - set_fact: openshift_image_tag: v{{ openshift_version }} - when: openshift_image_tag is not defined + when: > + openshift_image_tag is not defined or openshift_image_tag == "" + or l_force_image_tag_to_version | bool diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index 3ed1d2cfe..9eb38cb2b 100644 --- a/roles/openshift_version/tasks/first_master_containerized_version.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml @@ -6,6 +6,7 @@ openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}" when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_version is not defined - not (openshift_version_reinit | default(false)) diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml index 5d92f90c6..85e440513 100644 --- a/roles/openshift_version/tasks/first_master_rpm_version.yml +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -5,6 +5,7 @@ openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}" when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_version is not defined - not (openshift_version_reinit | default(false)) diff --git a/roles/openshift_web_console/defaults/main.yml b/roles/openshift_web_console/defaults/main.yml index c747f73a8..627db393a 100644 --- a/roles/openshift_web_console/defaults/main.yml +++ b/roles/openshift_web_console/defaults/main.yml @@ -1,2 +1,2 @@ --- -openshift_web_console_nodeselector: "{{ openshift_hosted_infra_selector | default('region=infra') | map_from_pairs }}" +openshift_web_console_nodeselector: {"node-role.kubernetes.io/master":"true"} diff --git a/roles/openshift_web_console/files/console-config.yaml b/roles/openshift_web_console/files/console-config.yaml new file mode 100644 index 000000000..55c650fbe --- /dev/null +++ b/roles/openshift_web_console/files/console-config.yaml @@ -0,0 +1,24 @@ +apiVersion: webconsole.config.openshift.io/v1 +kind: WebConsoleConfiguration +clusterInfo: + consolePublicURL: https://127.0.0.1:8443/console/ + loggingPublicURL: "" + logoutPublicURL: "" + masterPublicURL: https://127.0.0.1:8443 + metricsPublicURL: "" +extensions: + scriptURLs: [] + stylesheetURLs: [] + properties: null +features: + inactivityTimeoutMinutes: 0 + clusterResourceOverridesEnabled: false +servingInfo: + bindAddress: 0.0.0.0:8443 + bindNetwork: tcp4 + certFile: /var/serving-cert/tls.crt + clientCA: "" + keyFile: /var/serving-cert/tls.key + maxRequestsInFlight: 0 + namedCertificates: null + requestTimeoutSeconds: 0 diff --git a/roles/openshift_web_console/files/console-rbac-template.yaml b/roles/openshift_web_console/files/console-rbac-template.yaml new file mode 100644 index 000000000..9ee117199 --- /dev/null +++ b/roles/openshift_web_console/files/console-rbac-template.yaml @@ -0,0 +1,38 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: web-console-server-rbac +parameters: +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +objects: + + +# allow grant powers to the webconsole server for cluster inspection +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRole + metadata: + name: system:openshift:web-console-server + rules: + - apiGroups: + - "servicecatalog.k8s.io" + resources: + - clusterservicebrokers + verbs: + - get + - list + - watch + +# Grant the service account for the web console +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: system:openshift:web-console-server + roleRef: + kind: ClusterRole + name: system:openshift:web-console-server + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: webconsole diff --git a/roles/openshift_web_console/files/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml new file mode 100644 index 000000000..547e7a265 --- /dev/null +++ b/roles/openshift_web_console/files/console-template.yaml @@ -0,0 +1,127 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: openshift-web-console + annotations: + openshift.io/display-name: OpenShift Web Console + description: The server for the OpenShift web console. + iconClass: icon-openshift + tags: openshift,infra + openshift.io/documentation-url: https://github.com/openshift/origin-web-console-server + openshift.io/support-url: https://access.redhat.com + openshift.io/provider-display-name: Red Hat, Inc. +parameters: +- name: IMAGE + value: openshift/origin-web-console:latest +- name: NAMESPACE + # This namespace cannot be changed. Only `openshift-web-console` is supported. + value: openshift-web-console +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG +- name: NODE_SELECTOR + value: "{}" +- name: REPLICA_COUNT + value: "1" +objects: + +# to create the web console server +- apiVersion: apps/v1beta1 + kind: Deployment + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + webconsole: "true" + spec: + replicas: "${{REPLICA_COUNT}}" + strategy: + type: Recreate + template: + metadata: + name: webconsole + labels: + webconsole: "true" + spec: + serviceAccountName: webconsole + containers: + - name: webconsole + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/origin-web-console" + - "--audit-log-path=-" + - "-v=${LOGLEVEL}" + - "--config=/var/webconsole-config/webconsole-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/webconsole-config + name: webconsole-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + livenessProbe: + httpGet: + path: / + port: 8443 + scheme: HTTPS + resources: + requests: + cpu: 100m + memory: 100Mi + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 400 + secretName: webconsole-serving-cert + - name: webconsole-config + configMap: + defaultMode: 440 + name: webconsole-config + +# to create the config for the web console +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: webconsole-config + labels: + app: openshift-web-console + data: + webconsole-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + +# to be able to expose web console inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: webconsole + labels: + app: openshift-web-console + annotations: + service.alpha.openshift.io/serving-cert-secret-name: webconsole-serving-cert + prometheus.io/scrape: "true" + prometheus.io/scheme: https + spec: + selector: + webconsole: "true" + ports: + - name: https + port: 443 + targetPort: 8443 diff --git a/roles/openshift_web_console/tasks/install.yml b/roles/openshift_web_console/tasks/install.yml index cc5eef47d..f79a05c94 100644 --- a/roles/openshift_web_console/tasks/install.yml +++ b/roles/openshift_web_console/tasks/install.yml @@ -33,7 +33,7 @@ - name: Copy web console templates to temp directory copy: - src: "{{ __console_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __console_template_file }}" @@ -71,6 +71,9 @@ - set_fact: config_to_migrate: "{{ master_config_output.content | b64decode | from_yaml }}" + - set_fact: + cro_plugin_enabled: "{{ config_to_migrate.admissionConfig is defined and config_to_migrate.admissionConfig.pluginConfig is defined and config_to_migrate.admissionConfig.pluginConfig.ClusterResourceOverrides is defined }}" + # Update properties in the config template based on inventory vars when the # asset config does not exist. - name: Set web console config properties from inventory variables @@ -87,7 +90,7 @@ - key: features#inactivityTimeoutMinutes value: "{{ openshift_web_console_inactivity_timeout_minutes | default(0) }}" - key: features#clusterResourceOverridesEnabled - value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(false) }}" + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" - key: extensions#scriptURLs value: "{{ openshift_web_console_extension_script_urls | default([]) }}" - key: extensions#stylesheetURLs @@ -116,6 +119,8 @@ value: "{{ config_to_migrate.assetConfig.servingInfo.maxRequestsInFlight | default(0) }}" - key: servingInfo#requestTimeoutSeconds value: "{{ config_to_migrate.assetConfig.servingInfo.requestTimeoutSeconds | default(0) }}" + - key: features#clusterResourceOverridesEnabled + value: "{{ openshift_web_console_cluster_resource_overrides_enabled | default(cro_plugin_enabled) }}" separator: '#' state: present when: config_to_migrate.assetConfig is defined diff --git a/roles/openshift_web_console/tasks/remove_old_asset_config.yml b/roles/openshift_web_console/tasks/remove_old_asset_config.yml new file mode 100644 index 000000000..34158150c --- /dev/null +++ b/roles/openshift_web_console/tasks/remove_old_asset_config.yml @@ -0,0 +1,19 @@ +--- +# Remove the obsolete assetConfig stanza from master-config.yaml. Since the +# web console has been split out into a separate deployment, those settings +# are no longer used. +- name: Remove assetConfig from master-config.yaml + yedit: + state: absent + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: assetConfig + +# This file was written by wire_aggregator.yml. It is no longer needed since +# the web console now discovers if the template service broker is running on +# startup. Remove the file if it exists. +- name: Remove obsolete web console / service catalog extension file + file: + state: absent + # Hard-code the path instead of using `openshift.common.config_base` since + # the path is hard-coded in wire_aggregator.yml. + path: /etc/origin/master/openshift-ansible-catalog-console.js diff --git a/roles/openshift_web_console/vars/main.yml b/roles/openshift_web_console/vars/main.yml index e91048e38..72bff5d01 100644 --- a/roles/openshift_web_console/vars/main.yml +++ b/roles/openshift_web_console/vars/main.yml @@ -1,6 +1,4 @@ --- -__console_files_location: "../../../files/origin-components/" - __console_template_file: "console-template.yaml" __console_rbac_file: "console-rbac-template.yaml" __console_config_file: "console-config.yaml" diff --git a/roles/template_service_broker/files/apiserver-config.yaml b/roles/template_service_broker/files/apiserver-config.yaml new file mode 100644 index 000000000..e4048d1da --- /dev/null +++ b/roles/template_service_broker/files/apiserver-config.yaml @@ -0,0 +1,4 @@ +kind: TemplateServiceBrokerConfig +apiVersion: config.templateservicebroker.openshift.io/v1 +templateNamespaces: +- openshift diff --git a/roles/template_service_broker/files/apiserver-template.yaml b/roles/template_service_broker/files/apiserver-template.yaml new file mode 100644 index 000000000..4dd9395d0 --- /dev/null +++ b/roles/template_service_broker/files/apiserver-template.yaml @@ -0,0 +1,125 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-apiserver +parameters: +- name: IMAGE + value: openshift/origin-template-service-broker:latest +- name: NAMESPACE + value: openshift-template-service-broker +- name: LOGLEVEL + value: "0" +- name: API_SERVER_CONFIG + value: | + kind: TemplateServiceBrokerConfig + apiVersion: config.templateservicebroker.openshift.io/v1 + templateNamespaces: + - openshift +- name: NODE_SELECTOR + value: "{}" +objects: + +# to create the tsb server +- apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + namespace: ${NAMESPACE} + name: apiserver + labels: + apiserver: "true" + spec: + template: + metadata: + name: apiserver + labels: + apiserver: "true" + spec: + serviceAccountName: apiserver + containers: + - name: c + image: ${IMAGE} + imagePullPolicy: IfNotPresent + command: + - "/usr/bin/template-service-broker" + - "start" + - "template-service-broker" + - "--secure-port=8443" + - "--audit-log-path=-" + - "--tls-cert-file=/var/serving-cert/tls.crt" + - "--tls-private-key-file=/var/serving-cert/tls.key" + - "--v=${LOGLEVEL}" + - "--config=/var/apiserver-config/apiserver-config.yaml" + ports: + - containerPort: 8443 + volumeMounts: + - mountPath: /var/serving-cert + name: serving-cert + - mountPath: /var/apiserver-config + name: apiserver-config + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + nodeSelector: "${{NODE_SELECTOR}}" + volumes: + - name: serving-cert + secret: + defaultMode: 420 + secretName: apiserver-serving-cert + - name: apiserver-config + configMap: + defaultMode: 420 + name: apiserver-config + +# to create the config for the TSB +- apiVersion: v1 + kind: ConfigMap + metadata: + namespace: ${NAMESPACE} + name: apiserver-config + data: + apiserver-config.yaml: ${API_SERVER_CONFIG} + +# to be able to assign powers to the process +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: apiserver + +# to be able to expose TSB inside the cluster +- apiVersion: v1 + kind: Service + metadata: + namespace: ${NAMESPACE} + name: apiserver + annotations: + service.alpha.openshift.io/serving-cert-secret-name: apiserver-serving-cert + spec: + selector: + apiserver: "true" + ports: + - port: 443 + targetPort: 8443 + +# This service account will be granted permission to call the TSB. +# The token for this SA will be provided to the service catalog for +# use when calling the TSB. +- apiVersion: v1 + kind: ServiceAccount + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# This secret will be populated with a copy of the templateservicebroker-client SA's +# auth token. Since this secret has a static name, it can be referenced more +# easily than the auto-generated secret for the service account. +- apiVersion: v1 + kind: Secret + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-client + annotations: + kubernetes.io/service-account.name: templateservicebroker-client + type: kubernetes.io/service-account-token diff --git a/roles/template_service_broker/files/rbac-template.yaml b/roles/template_service_broker/files/rbac-template.yaml new file mode 100644 index 000000000..0937a9065 --- /dev/null +++ b/roles/template_service_broker/files/rbac-template.yaml @@ -0,0 +1,92 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-rbac +parameters: +- name: NAMESPACE + value: openshift-template-service-broker +- name: KUBE_SYSTEM + value: kube-system +objects: + +# Grant the service account permission to call the TSB +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: templateservicebroker-client + roleRef: + kind: ClusterRole + name: system:openshift:templateservicebroker-client + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: templateservicebroker-client + +# to delegate authentication and authorization +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: auth-delegator-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to have the template service broker powers +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: tsb-${NAMESPACE} + roleRef: + kind: ClusterRole + name: system:openshift:controller:template-service-broker + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# to read the config for terminating authentication +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${KUBE_SYSTEM} + name: extension-apiserver-authentication-reader-${NAMESPACE} + roleRef: + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + namespace: ${NAMESPACE} + name: apiserver + +# allow the kube service catalog's SA to read the static secret defined +# above, which will contain the token for the SA that can call the TSB. +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: Role + metadata: + name: templateservicebroker-auth-reader + namespace: ${NAMESPACE} + rules: + - apiGroups: + - "" + resourceNames: + - templateservicebroker-client + resources: + - secrets + verbs: + - get +- apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding + metadata: + namespace: ${NAMESPACE} + name: templateservicebroker-auth-reader + roleRef: + kind: Role + name: templateservicebroker-auth-reader + subjects: + - kind: ServiceAccount + namespace: kube-service-catalog + name: service-catalog-controller diff --git a/roles/template_service_broker/files/template-service-broker-registration.yaml b/roles/template_service_broker/files/template-service-broker-registration.yaml new file mode 100644 index 000000000..95fb72924 --- /dev/null +++ b/roles/template_service_broker/files/template-service-broker-registration.yaml @@ -0,0 +1,25 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: template-service-broker-registration +parameters: +- name: TSB_NAMESPACE + value: openshift-template-service-broker +- name: CA_BUNDLE + required: true +objects: +# register the tsb with the service catalog +- apiVersion: servicecatalog.k8s.io/v1beta1 + kind: ClusterServiceBroker + metadata: + name: template-service-broker + spec: + url: https://apiserver.${TSB_NAMESPACE}.svc:443/brokers/template.openshift.io + insecureSkipTLSVerify: false + caBundle: ${CA_BUNDLE} + authInfo: + bearer: + secretRef: + kind: Secret + name: templateservicebroker-client + namespace: ${TSB_NAMESPACE} diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 4e6ad2ae5..d0a07c48d 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -28,7 +28,7 @@ changed_when: false - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 48dc1327e..b46dd4771 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -9,7 +9,7 @@ changed_when: false - copy: - src: "{{ __tsb_files_location }}/{{ item }}" + src: "{{ item }}" dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - "{{ __tsb_template_file }}" diff --git a/roles/template_service_broker/vars/main.yml b/roles/template_service_broker/vars/main.yml index a65340f16..7dec24a79 100644 --- a/roles/template_service_broker/vars/main.yml +++ b/roles/template_service_broker/vars/main.yml @@ -1,6 +1,4 @@ --- -__tsb_files_location: "../../../files/origin-components/" - __tsb_template_file: "apiserver-template.yaml" __tsb_config_file: "apiserver-config.yaml" __tsb_rbac_file: "rbac-template.yaml" |