diff options
36 files changed, 529 insertions, 92 deletions
diff --git a/README_libvirt.md b/README_libvirt.md index c523d83fb..1661681a0 100644 --- a/README_libvirt.md +++ b/README_libvirt.md @@ -15,7 +15,7 @@ Install dependencies 3. Install [ebtables](http://ebtables.netfilter.org/) 4. Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page) 5. Install [libvirt-python and libvirt](http://libvirt.org/) -6. Install [genisoimage](http://cdrkit.org/) +6. Install [genisoimage](http://cdrkit.org/) or [mkisofs](http://cdrtools.sourceforge.net/private/cdrecord.html) 7. Enable and start the libvirt daemon, e.g: - `systemctl enable libvirtd` - `systemctl start libvirtd` @@ -23,6 +23,7 @@ Install dependencies 9. Check that your `$HOME` is accessible to the qemu user² 10. Configure dns resolution on the host³ 11. Install libselinux-python +12. Ensure you have an SSH private and public keypair at `~/.ssh/id_rsa` and `~/.ssh/id_rsa.pub`⁴ #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access. @@ -103,6 +104,11 @@ sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1 ``` +#### ⁴ Private and public keypair in ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub + +This playbook uses SSH keys to communicate with the libvirt-driven virtual machines. At this time the names of those keys are fixed and cannot be changed. + + Test The Setup -------------- diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py index c6d0e69eb..36a90a870 100644 --- a/filter_plugins/oo_filters.py +++ b/filter_plugins/oo_filters.py @@ -194,10 +194,10 @@ def oo_select_keys_from_list(data, keys): """ if not isinstance(data, list): - raise errors.AnsibleFilterError("|failed expects to filter on a list") + raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects to filter on a list") if not isinstance(keys, list): - raise errors.AnsibleFilterError("|failed expects first param is a list") + raise errors.AnsibleFilterError("|oo_select_keys_from_list failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [oo_select_keys(item, keys) for item in data] @@ -213,10 +213,10 @@ def oo_select_keys(data, keys): """ if not isinstance(data, Mapping): - raise errors.AnsibleFilterError("|failed expects to filter on a dict or object") + raise errors.AnsibleFilterError("|oo_select_keys failed expects to filter on a dict or object") if not isinstance(keys, list): - raise errors.AnsibleFilterError("|failed expects first param is a list") + raise errors.AnsibleFilterError("|oo_select_keys failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [data[key] for key in keys if key in data] diff --git a/inventory/byo/hosts.byo.glusterfs.external.example b/inventory/byo/hosts.byo.glusterfs.external.example index 628d3a3f7..5a284ce97 100644 --- a/inventory/byo/hosts.byo.glusterfs.external.example +++ b/inventory/byo/hosts.byo.glusterfs.external.example @@ -31,13 +31,13 @@ openshift_storage_glusterfs_is_native=False openshift_storage_glusterfs_heketi_url=172.0.0.1 [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" diff --git a/inventory/byo/hosts.byo.glusterfs.mixed.example b/inventory/byo/hosts.byo.glusterfs.mixed.example index fd47cb9d5..d16df6470 100644 --- a/inventory/byo/hosts.byo.glusterfs.mixed.example +++ b/inventory/byo/hosts.byo.glusterfs.mixed.example @@ -34,13 +34,13 @@ openshift_storage_glusterfs_heketi_is_native=True openshift_storage_glusterfs_heketi_executor=ssh openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +master openshift_schedulable=False +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes of the external # GlusterFS cluster. At a minimum, each node must have "glusterfs_hostname" diff --git a/inventory/byo/hosts.byo.glusterfs.native.example b/inventory/byo/hosts.byo.glusterfs.native.example index a3e2570c9..c1a1f6f84 100644 --- a/inventory/byo/hosts.byo.glusterfs.native.example +++ b/inventory/byo/hosts.byo.glusterfs.native.example @@ -24,15 +24,15 @@ ansible_ssh_user=root openshift_deployment_type=origin [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.byo.glusterfs.registry-only.example b/inventory/byo/hosts.byo.glusterfs.registry-only.example index 999518abe..31a85ee42 100644 --- a/inventory/byo/hosts.byo.glusterfs.registry-only.example +++ b/inventory/byo/hosts.byo.glusterfs.registry-only.example @@ -30,15 +30,15 @@ openshift_deployment_type=origin openshift_hosted_registry_storage_kind=glusterfs [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node0 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node1 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node2 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node0 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node1 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node2 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example index 1df79301a..54bd89ddc 100644 --- a/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example +++ b/inventory/byo/hosts.byo.glusterfs.storage-and-registry.example @@ -31,20 +31,20 @@ openshift_deployment_type=origin openshift_hosted_registry_storage_kind=glusterfs [masters] -master node=True storage=True master=True +master [nodes] -master node=True storage=True master=True openshift_schedulable=False +master openshift_schedulable=False # It is recommended to not use a single cluster for both general and registry # storage, so two three-node clusters will be required. -node0 node=True openshift_schedulable=True -node1 node=True openshift_schedulable=True -node2 node=True openshift_schedulable=True +node0 openshift_schedulable=True +node1 openshift_schedulable=True +node2 openshift_schedulable=True # A hosted registry, by default, will only be deployed on nodes labeled # "region=infra". -node3 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node4 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True -node5 node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node3 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node4 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True +node5 openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True # Specify the glusterfs group, which contains the nodes that will host # GlusterFS storage pods. At a minimum, each node must have a diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 6e53b4fd9..de7493f71 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -156,8 +156,9 @@ openshift_release=v3.6 # modify image streams to point at that registry by setting the following to true #openshift_examples_modify_imagestreams=true -# Origin copr repo +# OpenShift repository configuration #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +#openshift_repos_enable_testing=false # htpasswd auth openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] @@ -882,6 +883,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 # where as this would not # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal==false # host group for masters [masters] diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index e3e9220fc..62a364e0d 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -155,8 +155,9 @@ openshift_release=v3.6 # modify image streams to point at that registry by setting the following to true #openshift_examples_modify_imagestreams=true -# Additional yum repos to install +# OpenShift repository configuration #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] +#openshift_repos_enable_testing=false # htpasswd auth openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] @@ -878,6 +879,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 # where as this would not # openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal==true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal==false # host group for masters [masters] diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 2b2f10aee..695dc3140 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -12,6 +12,12 @@ command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -140,16 +146,21 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - - set_fact: - master_update_complete: True - -- name: Post master upgrade - Upgrade clusterpolicies storage - hosts: oo_first_master - tasks: - - name: Upgrade clusterpolicies storage + - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + run_once: true + delegate_to: oo_first_master + + - set_fact: + master_update_complete: True ############################################################################## # Gate on master update complete @@ -230,11 +241,17 @@ - reconcile_scc_result.rc == 0 run_once: true - - name: Upgrade job storage + - name: Migrate storage post policy reconciliation command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig migrate storage --include=* --confirm run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool - set_fact: reconcile_complete: True diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml index 1efdfb336..edc15a3f2 100644 --- a/playbooks/common/openshift-glusterfs/config.yml +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -1,6 +1,6 @@ --- -- name: Open firewall ports for GlusterFS - hosts: oo_glusterfs_to_config +- name: Open firewall ports for GlusterFS nodes + hosts: glusterfs vars: os_firewall_allow: - service: glusterfs_sshd @@ -14,7 +14,24 @@ roles: - role: os_firewall when: - - openshift_storage_glusterfs_is_native | default(True) + - openshift_storage_glusterfs_is_native | default(True) | bool + +- name: Open firewall ports for GlusterFS registry nodes + hosts: glusterfs_registry + vars: + os_firewall_allow: + - service: glusterfs_sshd + port: "2222/tcp" + - service: glusterfs_daemon + port: "24007/tcp" + - service: glusterfs_management + port: "24008/tcp" + - service: glusterfs_bricks + port: "49152-49251/tcp" + roles: + - role: os_firewall + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool - name: Configure GlusterFS hosts: oo_first_master diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7d3a371e3..5de03951c 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -127,6 +127,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" roles: - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index ccd29be29..4df86effa 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -49,11 +49,15 @@ - '{{ instances }}' - [ user-data, meta-data ] +- name: Check for genisoimage + command: which genisoimage + register: which_genisoimage + - name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' + command: "{{ 'genisoimage' if which_genisoimage.rc == 0 else 'mkisofs' }} -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data" args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' + chdir: "{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/" + creates: "{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso" with_items: '{{ instances }}' - name: Refresh the libvirt storage pool for openshift diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml index f672760aa..0b3a2a69d 100644 --- a/roles/ansible_service_broker/vars/openshift-enterprise.yml +++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml @@ -1,6 +1,6 @@ --- -__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ +__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose- __ansible_service_broker_image_tag: latest __ansible_service_broker_etcd_image_prefix: rhel7/ diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index f0661209f..8c2f392ee 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -14,7 +14,8 @@ name: etcd_common vars: r_etcd_common_action: drop_etcdctl - when: openshift_etcd_etcdctl_profile | default(true) | bool + when: + - openshift_etcd_etcdctl_profile | default(true) | bool - block: - name: Pull etcd container diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 4f371fd89..bdece7640 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -1,7 +1,7 @@ --- openshift_storageclass_defaults: aws: - provisioner: kubernetes.io/aws-ebs + provisioner: aws-ebs name: gp2 parameters: type: gp2 @@ -9,8 +9,9 @@ openshift_storageclass_defaults: encrypted: 'false' gce: name: standard - provisioner: kubernetes.io/gce-pd - type: pd-standard + provisioner: gce-pd + parameters: + type: pd-standard openshift_storageclass_default: "true" openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}" diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml index 82cab6746..172e2ac25 100644 --- a/roles/openshift_default_storage_class/tasks/main.yml +++ b/roles/openshift_default_storage_class/tasks/main.yml @@ -4,8 +4,6 @@ oc_storageclass: name: "{{ openshift_storageclass_name }}" default_storage_class: "{{ openshift_storageclass_default | default('true') | string}}" - parameters: - type: "{{ openshift_storageclass_parameters.type | default('gp2') }}" - encrypted: "{{ openshift_storageclass_parameters.encrypted | default('false') | string }}" - kmsKeyId: "{{ openshift_storageclass_parameters.kmsKeyId | default('') }}" + parameters: "{{ openshift_storageclass_parameters }}" + provisioner: "{{ openshift_storageclass_provisioner }}" run_once: true diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml index 934f1b2d2..1b4818df9 100644 --- a/roles/openshift_excluder/tasks/exclude.yml +++ b/roles/openshift_excluder/tasks/exclude.yml @@ -5,7 +5,7 @@ register: docker_excluder_stat - name: Enable docker excluder - command: "{{ r_openshift_excluder_service_type }}-docker-excluder exclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder exclude" when: - r_openshift_excluder_enable_docker_excluder | bool - docker_excluder_stat.stat.exists @@ -16,7 +16,7 @@ register: openshift_excluder_stat - name: Enable openshift excluder - command: "{{ r_openshift_excluder_service_type }}-excluder exclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder exclude" when: - r_openshift_excluder_enable_openshift_excluder | bool - openshift_excluder_stat.stat.exists diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml index a5ce8d5c7..a68165bde 100644 --- a/roles/openshift_excluder/tasks/unexclude.yml +++ b/roles/openshift_excluder/tasks/unexclude.yml @@ -9,7 +9,7 @@ register: docker_excluder_stat - name: disable docker excluder - command: "{{ r_openshift_excluder_service_type }}-docker-excluder unexclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-docker-excluder unexclude" when: - unexclude_docker_excluder | default(false) | bool - docker_excluder_stat.stat.exists @@ -20,7 +20,7 @@ register: openshift_excluder_stat - name: disable openshift excluder - command: "{{ r_openshift_excluder_service_type }}-excluder unexclude" + command: "/sbin/{{ r_openshift_excluder_service_type }}-excluder unexclude" when: - unexclude_openshift_excluder | default(false) | bool - openshift_excluder_stat.stat.exists diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index 4712ca3a8..49cc51b48 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1647,6 +1647,13 @@ def set_proxy_facts(facts): common['no_proxy'] = common['no_proxy'].split(",") elif 'no_proxy' not in common: common['no_proxy'] = [] + + # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783 + # masters behind a proxy need to connect to etcd via IP + if 'no_proxy_etcd_host_ips' in common: + if isinstance(common['no_proxy_etcd_host_ips'], string_types): + common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(',')) + if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']): if 'no_proxy_internal_hostnames' in common: common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(',')) diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py index 6e951e82c..02a094007 100644 --- a/roles/openshift_health_checker/openshift_checks/logging/logging.py +++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py @@ -12,20 +12,21 @@ class LoggingCheck(OpenShiftCheck): """Base class for logging component checks""" name = "logging" + logging_namespace = "logging" @classmethod def is_active(cls, task_vars): - return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars) + logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=False) + return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars) and logging_deployed @staticmethod def is_first_master(task_vars): - """Run only on first master and only when logging is configured. Returns: bool""" - logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=True) + """Run only on first master. Returns: bool""" # Note: It would be nice to use membership in oo_first_master group, however for now it # seems best to avoid requiring that setup and just check this is the first master. hostname = get_var(task_vars, "ansible_ssh_host") or [None] masters = get_var(task_vars, "groups", "masters", default=None) or [None] - return logging_deployed and masters[0] == hostname + return masters and masters[0] == hostname def run(self, tmp, task_vars): pass @@ -45,7 +46,7 @@ class LoggingCheck(OpenShiftCheck): raise ValueError() except ValueError: # successful run but non-parsing data generally means there were no pods in the namespace - return None, 'There are no pods in the {} namespace. Is logging deployed?'.format(namespace) + return None, 'No pods were found for the "{}" logging component.'.format(logging_component) return pods['items'], None diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py new file mode 100644 index 000000000..2ddd7549d --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py @@ -0,0 +1,132 @@ +""" +Check for ensuring logs from pods can be queried in a reasonable amount of time. +""" + +import json +import time + +from uuid import uuid4 + +from openshift_checks import get_var, OpenShiftCheckException +from openshift_checks.logging.logging import LoggingCheck + + +ES_CMD_TIMEOUT_SECONDS = 30 + + +class LoggingIndexTime(LoggingCheck): + """Check that pod logs are aggregated and indexed in ElasticSearch within a reasonable amount of time.""" + name = "logging_index_time" + tags = ["health", "logging"] + + logging_namespace = "logging" + + def run(self, tmp, task_vars): + """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs.""" + try: + log_index_timeout = int( + get_var(task_vars, "openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS) + ) + except ValueError: + return { + "failed": True, + "msg": ('Invalid value provided for "openshift_check_logging_index_timeout_seconds". ' + 'Value must be an integer representing an amount in seconds.'), + } + + running_component_pods = dict() + + # get all component pods + self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default=self.logging_namespace) + for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']): + pods, error = self.get_pods_for_component( + self.execute_module, self.logging_namespace, component, task_vars, + ) + + if error: + msg = 'Unable to retrieve pods for the {} logging component: {}' + return {"failed": True, "changed": False, "msg": msg.format(name, error)} + + running_pods = self.running_pods(pods) + + if not running_pods: + msg = ('No {} pods in the "Running" state were found.' + 'At least one pod is required in order to perform this check.') + return {"failed": True, "changed": False, "msg": msg.format(name)} + + running_component_pods[component] = running_pods + + uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0], task_vars) + self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout, task_vars) + return {} + + def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs, task_vars): + """Retry an Elasticsearch query every second until query success, or a defined + length of time has passed.""" + deadline = time.time() + timeout_secs + interval = 1 + while not self.query_es_from_es(es_pod, uuid, task_vars): + if time.time() + interval > deadline: + msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s." + raise OpenShiftCheckException(msg.format(uuid, timeout_secs)) + time.sleep(interval) + + def curl_kibana_with_uuid(self, kibana_pod, task_vars): + """curl Kibana with a unique uuid.""" + uuid = self.generate_uuid() + pod_name = kibana_pod["metadata"]["name"] + exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}" + exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid) + + error_str = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars) + + try: + error_code = json.loads(error_str)["statusCode"] + except KeyError: + msg = ('invalid response returned from Kibana request (Missing "statusCode" key):\n' + 'Command: {}\nResponse: {}').format(exec_cmd, error_str) + raise OpenShiftCheckException(msg) + except ValueError: + msg = ('invalid response returned from Kibana request (Non-JSON output):\n' + 'Command: {}\nResponse: {}').format(exec_cmd, error_str) + raise OpenShiftCheckException(msg) + + if error_code != 404: + msg = 'invalid error code returned from Kibana request. Expecting error code "404", but got "{}" instead.' + raise OpenShiftCheckException(msg.format(error_code)) + + return uuid + + def query_es_from_es(self, es_pod, uuid, task_vars): + """curl the Elasticsearch pod and look for a unique uuid in its logs.""" + pod_name = es_pod["metadata"]["name"] + exec_cmd = ( + "exec {pod_name} -- curl --max-time 30 -s -f " + "--cacert /etc/elasticsearch/secret/admin-ca " + "--cert /etc/elasticsearch/secret/admin-cert " + "--key /etc/elasticsearch/secret/admin-key " + "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}" + ) + exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid) + result = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars) + + try: + count = json.loads(result)["count"] + except KeyError: + msg = 'invalid response from Elasticsearch query:\n"{}"\nMissing "count" key:\n{}' + raise OpenShiftCheckException(msg.format(exec_cmd, result)) + except ValueError: + msg = 'invalid response from Elasticsearch query:\n"{}"\nNon-JSON output:\n{}' + raise OpenShiftCheckException(msg.format(exec_cmd, result)) + + return count + + @staticmethod + def running_pods(pods): + """Filter pods that are running.""" + return [pod for pod in pods if pod['status']['phase'] == 'Running'] + + @staticmethod + def generate_uuid(): + """Wrap uuid generator. Allows for testing with expected values.""" + return str(uuid4()) diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py index 128b76b12..4f71fbf52 100644 --- a/roles/openshift_health_checker/test/logging_check_test.py +++ b/roles/openshift_health_checker/test/logging_check_test.py @@ -128,7 +128,7 @@ def test_is_active(groups, logging_deployed, is_active): ( 'No resources found.', None, - 'There are no pods in the logging namespace', + 'No pods were found for the "es"', ), ( json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}), diff --git a/roles/openshift_health_checker/test/logging_index_time_test.py b/roles/openshift_health_checker/test/logging_index_time_test.py new file mode 100644 index 000000000..79e7c7d4c --- /dev/null +++ b/roles/openshift_health_checker/test/logging_index_time_test.py @@ -0,0 +1,182 @@ +import json + +import pytest + +from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException + + +SAMPLE_UUID = "unique-test-uuid" + + +def canned_loggingindextime(exec_oc=None): + """Create a check object with a canned exec_oc method""" + check = LoggingIndexTime("dummy") # fails if a module is actually invoked + if exec_oc: + check.exec_oc = exec_oc + return check + + +plain_running_elasticsearch_pod = { + "metadata": { + "labels": {"component": "es", "deploymentconfig": "logging-es-data-master"}, + "name": "logging-es-data-master-1", + }, + "status": { + "containerStatuses": [{"ready": True}, {"ready": True}], + "phase": "Running", + } +} +plain_running_kibana_pod = { + "metadata": { + "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"}, + "name": "logging-kibana-1", + }, + "status": { + "containerStatuses": [{"ready": True}, {"ready": True}], + "phase": "Running", + } +} +not_running_kibana_pod = { + "metadata": { + "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"}, + "name": "logging-kibana-2", + }, + "status": { + "containerStatuses": [{"ready": True}, {"ready": False}], + "conditions": [{"status": "True", "type": "Ready"}], + "phase": "pending", + } +} + + +@pytest.mark.parametrize('pods, expect_pods', [ + ( + [not_running_kibana_pod], + [], + ), + ( + [plain_running_kibana_pod], + [plain_running_kibana_pod], + ), + ( + [], + [], + ) +]) +def test_check_running_pods(pods, expect_pods): + check = canned_loggingindextime(None) + pods = check.running_pods(pods) + assert pods == expect_pods + + +@pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [ + ( + 'valid count in response', + { + "count": 1, + }, + SAMPLE_UUID, + 0.001, + [], + ), +], ids=lambda argval: argval[0]) +def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extra_words): + def exec_oc(execute_module, ns, exec_cmd, args, task_vars): + return json.dumps(json_response) + + check = canned_loggingindextime(exec_oc) + check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None) + + +@pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [ + ( + 'invalid json response', + { + "invalid_field": 1, + }, + SAMPLE_UUID, + 0.001, + ["invalid response", "Elasticsearch"], + ), + ( + 'empty response', + {}, + SAMPLE_UUID, + 0.001, + ["invalid response", "Elasticsearch"], + ), + ( + 'valid response but invalid match count', + { + "count": 0, + }, + SAMPLE_UUID, + 0.005, + ["expecting match", SAMPLE_UUID, "0.005s"], + ) +], ids=lambda argval: argval[0]) +def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words): + def exec_oc(execute_module, ns, exec_cmd, args, task_vars): + return json.dumps(json_response) + + check = canned_loggingindextime(exec_oc) + with pytest.raises(OpenShiftCheckException) as error: + check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None) + + for word in extra_words: + assert word in str(error) + + +@pytest.mark.parametrize('name, json_response, uuid, extra_words', [ + ( + 'correct response code, found unique id is returned', + { + "statusCode": 404, + }, + "sample unique id", + ["sample unique id"], + ), +], ids=lambda argval: argval[0]) +def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words): + def exec_oc(execute_module, ns, exec_cmd, args, task_vars): + return json.dumps(json_response) + + check = canned_loggingindextime(exec_oc) + check.generate_uuid = lambda: uuid + + result = check.curl_kibana_with_uuid(plain_running_kibana_pod, None) + + for word in extra_words: + assert word in result + + +@pytest.mark.parametrize('name, json_response, uuid, extra_words', [ + ( + 'invalid json response', + { + "invalid_field": "invalid", + }, + SAMPLE_UUID, + ["invalid response returned", 'Missing "statusCode" key'], + ), + ( + 'wrong error code in response', + { + "statusCode": 500, + }, + SAMPLE_UUID, + ["Expecting error code", "500"], + ), +], ids=lambda argval: argval[0]) +def test_failed_curl_kibana_with_uuid(name, json_response, uuid, extra_words): + def exec_oc(execute_module, ns, exec_cmd, args, task_vars): + return json.dumps(json_response) + + check = canned_loggingindextime(exec_oc) + check.generate_uuid = lambda: uuid + + with pytest.raises(OpenShiftCheckException) as error: + check.curl_kibana_with_uuid(plain_running_kibana_pod, None) + + for word in extra_words: + assert word in str(error) diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 23337bcd2..b2556fd71 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -11,7 +11,7 @@ openshift_logging_kibana_nodeselector: "" openshift_logging_kibana_cpu_limit: null openshift_logging_kibana_memory_limit: 736Mi -openshift_logging_kibana_hostname: "kibana.router.default.svc.cluster.local" +openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" openshift_logging_kibana_es_host: "logging-es" openshift_logging_kibana_es_port: 9200 diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 19ce0eda8..0c4ee319c 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -140,6 +140,12 @@ - set_fact: openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +- name: Set fact of all etcd host IPs + openshift_facts: + role: common + local_facts: + no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}" + - name: Install the systemd units include: systemd_units.yml diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 8d7ee00ed..31129a6ac 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -26,7 +26,6 @@ - name: generate htpasswd file for hawkular metrics local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}" - no_log: true become: false - name: copy local generated passwords to target diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml index e050c8eb2..1304ab8b5 100644 --- a/roles/openshift_metrics/tasks/generate_rolebindings.yaml +++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml @@ -13,3 +13,27 @@ - kind: ServiceAccount name: hawkular changed_when: no + +- name: generate hawkular-metrics cluster role binding for the hawkular service account + template: + src: rolebinding.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-rolebinding.yaml" + vars: + cluster: True + obj_name: hawkular-namespace-watcher + labels: + metrics-infra: hawkular + roleRef: + kind: ClusterRole + name: hawkular-metrics + subjects: + - kind: ServiceAccount + name: hawkular + namespace: "{{openshift_metrics_project}}" + changed_when: no + +- name: generate the hawkular cluster role + template: + src: hawkular_metrics_role.j2 + dest: "{{ mktemp.stdout }}/templates/hawkular-cluster-role.yaml" + changed_when: no diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 9a5d52eb6..403b1252c 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -6,7 +6,7 @@ command: > {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found --selector=metrics-infra - all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings + all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole register: delete_metrics changed_when: delete_metrics.stdout != 'No resources found' @@ -16,4 +16,5 @@ delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader + clusterrolebinding/hawkular-metrics changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/templates/hawkular_metrics_role.j2 b/roles/openshift_metrics/templates/hawkular_metrics_role.j2 new file mode 100644 index 000000000..6c9dbf5d6 --- /dev/null +++ b/roles/openshift_metrics/templates/hawkular_metrics_role.j2 @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ClusterRole +metadata: + name: hawkular-metrics + labels: + metrics-infra: hawkular-metrics +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - list + - get + - watch diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2 index 1dbe58439..e12a52c15 100644 --- a/roles/openshift_node/templates/node.service.j2 +++ b/roles/openshift_node/templates/node.service.j2 @@ -24,8 +24,8 @@ WorkingDirectory=/var/lib/origin/ SyslogIdentifier={{ openshift.common.service_type }}-node Restart=always RestartSec=5s +TimeoutStartSec=300 OOMScoreAdjust=-999 -KillMode=process [Install] WantedBy=multi-user.target diff --git a/roles/openshift_node_upgrade/templates/node.service.j2 b/roles/openshift_node_upgrade/templates/node.service.j2 index 1dbe58439..e12a52c15 100644 --- a/roles/openshift_node_upgrade/templates/node.service.j2 +++ b/roles/openshift_node_upgrade/templates/node.service.j2 @@ -24,8 +24,8 @@ WorkingDirectory=/var/lib/origin/ SyslogIdentifier={{ openshift.common.service_type }}-node Restart=always RestartSec=5s +TimeoutStartSec=300 OOMScoreAdjust=-999 -KillMode=process [Install] WantedBy=multi-user.target diff --git a/roles/openshift_repos/defaults/main.yaml b/roles/openshift_repos/defaults/main.yaml index 7c5a14cd7..44f34ea7b 100644 --- a/roles/openshift_repos/defaults/main.yaml +++ b/roles/openshift_repos/defaults/main.yaml @@ -1,2 +1,3 @@ --- openshift_additional_repos: {} +openshift_repos_enable_testing: false diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 8f8550e2d..7458db87e 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -33,7 +33,7 @@ # "centos-release-openshift-origin" package which configures the repository. # This task matches the file names provided by the package so that they are # not installed twice in different files and remains idempotent. - - name: Configure origin gpg keys if needed + - name: Configure origin repositories and gpg keys if needed copy: src: "{{ item.src }}" dest: "{{ item.dest }}" @@ -49,6 +49,10 @@ - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool + - name: Enable centos-openshift-origin-testing repository + command: yum-config-manager --enable centos-openshift-origin-testing + when: openshift_repos_enable_testing | bool + - name: Ensure clean repo cache in the event repos have been changed manually debug: msg: "First run of openshift_repos" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index b54a8e36c..7a2987883 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -2,29 +2,29 @@ - set_fact: glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" - glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_version }}" - glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" - glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" - glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" - glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe | bool }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native | bool }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing | bool }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing | bool }}" glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_heketi_cli }}" glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" - glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" - glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load | bool }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe | bool }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" glusterfs_heketi_port: "{{ openshift_storage_glusterfs_heketi_port }}" glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_heketi_executor }}" glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_heketi_ssh_port }}" glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}" - glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}" + glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo | bool }}" glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}" glusterfs_nodes: "{{ groups.glusterfs }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 0b4d1c82b..e46cec378 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -2,29 +2,29 @@ - set_fact: glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" - glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}" glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}" glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}" glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" - glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" - glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" - glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" - glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe | bool }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native | bool }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing | bool }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing | bool }}" glusterfs_heketi_cli: "{{ openshift_storage_glusterfs_registry_heketi_cli }}" glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" - glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" - glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load | bool }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe | bool }}" glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" glusterfs_heketi_port: "{{ openshift_storage_glusterfs_registry_heketi_port }}" glusterfs_heketi_executor: "{{ openshift_storage_glusterfs_registry_heketi_executor }}" glusterfs_heketi_ssh_port: "{{ openshift_storage_glusterfs_registry_heketi_ssh_port }}" glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_registry_heketi_ssh_user }}" - glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo }}" + glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}" glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}" glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}" @@ -56,7 +56,7 @@ - name: Create GlusterFS registry endpoints oc_obj: - namespace: "{{ glusterfs_namespace }}" + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" state: present kind: endpoints name: "glusterfs-{{ glusterfs_name }}-endpoints" @@ -65,7 +65,7 @@ - name: Create GlusterFS registry service oc_obj: - namespace: "{{ glusterfs_namespace }}" + namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" state: present kind: service name: "glusterfs-{{ glusterfs_name }}-endpoints" |