diff options
50 files changed, 1190 insertions, 76 deletions
diff --git a/images/installer/README_INVENTORY_GENERATOR.md b/images/installer/README_INVENTORY_GENERATOR.md new file mode 100644 index 000000000..9c10e4b71 --- /dev/null +++ b/images/installer/README_INVENTORY_GENERATOR.md @@ -0,0 +1,85 @@ +Dynamic Inventory Generation +============================ + +Script within the openshift-ansible image that can dynamically +generate an Ansible inventory file from an existing cluster. + +## Configure + +User configuration helps to provide additional details when creating an inventory file. +The default location of this file is in `/etc/inventory-generator-config.yaml`. The +following configuration values are either expected or default to the given values when omitted: + +- `master_config_path`: +  - specifies where to look for the bind-mounted `master-config.yaml` file in the container +  - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/master-config.yaml` + +- `admin_kubeconfig_path`: +  - specifies where to look for the bind-mounted `admin.kubeconfig` file in the container +  - if omitted or a `null` value is given, its value is defaulted to `/opt/app-root/src/.kube/config` + +- `ansible_ssh_user`: +  - specifies the ssh user to be used by Ansible when running the specified `PLAYBOOK_FILE` (see `README_CONTAINER_IMAGE.md` for additional information on this environment variable). +  - if omitted, its value is defaulted to `root` + +- `ansible_become_user`: +  - specifies a user to "become" on the remote host. Used for privilege escalation. +  - If a non-null value is specified, `ansible_become` is implicitly set to `yes` in the resulting inventory file. + +See the supplied sample user configuration file in [`root/etc/inventory-generator-config.yaml`](./root/etc/inventory-generator-config.yaml) for additional optional inventory variables that may be specified. + +## Build + +See `README_CONTAINER_IMAGE.md` for information on building this image. + +## Run + +Given a master node's `master-config.yaml` file, a user configuration file (see "Configure" section), and an `admin.kubeconfig` file, the command below will: + +1. Use `oc` to query the host about additional node information (using the supplied `kubeconfig` file) +2. Generate an inventory file based on information retrieved from `oc get nodes` and the given `master-config.yaml` file. +3. run the specified [openshift-ansible](https://github.com/openshift/openshift-ansible) `health.yml` playbook using the generated inventory file from the previous step + +``` +docker run -u `id -u` \ +       -v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z,ro \ +       -v /tmp/origin/master/admin.kubeconfig:/opt/app-root/src/.kube/config:Z \ +       -v /tmp/origin/master/master-config.yaml:/opt/app-root/src/master-config.yaml:Z \ +       -e OPTS="-v --become-user root" \ +       -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/health.yml \ +       -e GENERATE_INVENTORY=true \ +       -e USER=`whoami` \ +       openshift/origin-ansible + +``` + +**Note** In the command above, specifying the `GENERATE_INVENTORY` environment variable will automatically generate the inventory file in an expected location. +An `INVENTORY_FILE` variable (or any other inventory location) does not need to be supplied when generating an inventory. + +## Debug + +To debug the `generate` script, run the above script interactively +and manually execute `/usr/local/bin/generate`: + +``` +... +docker run -u `id -u` \ +       -v ... +       ... +       -it openshift/origin-ansible /bin/bash + +--- + +bash-4.2$ cd $HOME +bash-4.2$ ls +master-config.yaml +bash-4.2$ /usr/local/bin/generate $HOME/generated_hosts +bash-4.2$ ls +generated_hosts  master-config.yaml +bash-4.2$ less generated_hosts +... +``` + +## Notes + +See `README_CONTAINER_IMAGE.md` for additional information about this image. diff --git a/images/installer/root/etc/inventory-generator-config.yaml b/images/installer/root/etc/inventory-generator-config.yaml new file mode 100644 index 000000000..d56e3f4d2 --- /dev/null +++ b/images/installer/root/etc/inventory-generator-config.yaml @@ -0,0 +1,20 @@ +--- +# meta config +master_config_path: "/opt/app-root/src/master-config.yaml" +admin_kubeconfig_path: "/opt/app-root/src/.kube/config" + +# default user configuration +ansible_ssh_user: ec2-user +ansible_become: "yes" +ansible_become_user: "root" + +# openshift-ansible inventory vars +openshift_uninstall_images: false +openshift_install_examples: true +openshift_deployment_type: origin + +openshift_release: 3.6 +openshift_image_tag: v3.6.0 +openshift_hosted_logging_deploy: null  # defaults to "true" if loggingPublicURL is set in master-config.yaml +openshift_logging_image_version: v3.6.0 +openshift_disable_check: "" diff --git a/images/installer/root/usr/local/bin/generate b/images/installer/root/usr/local/bin/generate new file mode 100755 index 000000000..3db7a3ee8 --- /dev/null +++ b/images/installer/root/usr/local/bin/generate @@ -0,0 +1,397 @@ +#!/bin/env python + +""" +Attempts to read 'master-config.yaml' and extract remote +host information to dynamically create an inventory file +in order to run Ansible playbooks against that host. +""" + +import os +import re +import shlex +import shutil +import subprocess +import sys +import yaml + +try: +    HOME = os.environ['HOME'] +except KeyError: +    print 'A required environment variable "$HOME" has not been set' +    exit(1) + +DEFAULT_USER_CONFIG_PATH = '/etc/inventory-generator-config.yaml' +DEFAULT_MASTER_CONFIG_PATH = HOME + '/master-config.yaml' +DEFAULT_ADMIN_KUBECONFIG_PATH = HOME + '/.kube/config' + +INVENTORY_FULL_PATH = HOME + '/generated_hosts' +USE_STDOUT = True + +if len(sys.argv) > 1: +    INVENTORY_FULL_PATH = sys.argv[1] +    USE_STDOUT = False + + +class OpenShiftClientError(Exception): +    """Base exception class for OpenShift CLI wrapper""" +    pass + + +class InvalidHost(Exception): +    """Base exception class for host creation problems.""" +    pass + + +class InvalidHostGroup(Exception): +    """Base exception class for host-group creation problems.""" +    pass + + +class OpenShiftClient: +    oc = None +    kubeconfig = None + +    def __init__(self, kubeconfig=DEFAULT_ADMIN_KUBECONFIG_PATH): +        """Find and store path to oc binary""" +        # https://github.com/openshift/openshift-ansible/issues/3410 +        # oc can be in /usr/local/bin in some cases, but that may not +        # be in $PATH due to ansible/sudo +        paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ['/usr/local/bin', os.path.expanduser('~/bin')] + +        oc_binary_name = 'oc' +        oc_binary = None + +        # Use shutil.which if it is available, otherwise fallback to a naive path search +        try: +            which_result = shutil.which(oc_binary_name, path=os.pathsep.join(paths)) +            if which_result is not None: +                oc_binary = which_result +        except AttributeError: +            for path in paths: +                if os.path.exists(os.path.join(path, oc_binary_name)): +                    oc_binary = os.path.join(path, oc_binary_name) +                    break + +        if oc_binary is None: +            raise OpenShiftClientError('Unable to locate `oc` binary. Not present in PATH.') + +        self.oc = oc_binary +        self.kubeconfig = kubeconfig + +    def call(self, cmd_str): +        """Execute a remote call using `oc`""" +        cmd = [ +            self.oc, +            '--config', +            self.kubeconfig +        ] + shlex.split(cmd_str) +        try: +            out = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT) +        except subprocess.CalledProcessError as err: +            raise OpenShiftClientError('[rc {}] {}\n{}'.format(err.returncode, ' '.join(err.cmd), err.output)) +        return out + +    def whoami(self): +        """Retrieve information about the current user in the given kubeconfig""" +        return self.call('whoami') + +    def get_nodes(self): +        """Retrieve remote node information as a yaml object""" +        return self.call('get nodes -o yaml') + + +class HostGroup: +    groupname = "" +    hosts = list() + +    def __init__(self, hosts): +        if not hosts: +            return +        first = hosts[0].get_group_name() +        for h in hosts: +            if h.get_group_name() != first: +                raise InvalidHostGroup("Attempt to create HostGroup with hosts of varying groups.") + +        self.hosts = hosts +        self.groupname = first + +    def add_host(self, host): +        """Add a new host to this group.""" +        self.hosts.append(host) + +    def get_group_name(self): +        """Return the groupname associated with each aggregated host.""" +        return self.groupname + +    def get_hosts(self): +        """Return aggregated hosts""" +        return self.hosts + +    def string(self): +        """Call the print method for each aggregated host; separated by newlines.""" +        infos = "" +        for host in self.hosts: +            infos += host.string() + "\n" +        return infos + + +class Host: +    group = "masters" +    alias = "" +    hostname = "" +    public_hostname = "" +    ip_addr = "" +    public_ip_addr = "" + +    def __init__(self, groupname): +        if not groupname: +            raise InvalidHost("Attempt to create Host with no group name provided.") +        self.group = groupname + +    def get_group_name(self): +        return self.group + +    def get_openshift_hostname(self): +        return self.hostname + +    def host_alias(self, hostalias): +        """Set an alias for this host.""" +        self.alias = hostalias + +    def address(self, ip): +        """Set the ip address for this host.""" +        self.ip_addr = ip + +    def public_address(self, ip): +        """Set the external ip address for this host.""" +        self.public_ip_addr = ip + +    def host_name(self, hname): +        self.hostname = parse_hostname(hname) + +    def public_host_name(self, phname): +        self.public_hostname = parse_hostname(phname) + +    def string(self): +        """Print an inventory-file compatible string with host information""" +        info = "" +        if self.alias: +            info += self.alias + " " +        elif self.hostname: +            info += self.hostname + " " +        elif self.ip_addr: +            info += self.ip_addr + " " +        if self.ip_addr: +            info += "openshift_ip=" + self.ip_addr + " " +        if self.public_ip_addr: +            info += "openshift_public_ip=" + self.public_ip_addr + " " +        if self.hostname: +            info += "openshift_hostname=" + self.hostname + " " +        if self.public_hostname: +            info += "openshift_public_hostname=" + self.public_hostname + +        return info + + +def parse_hostname(host): +    """Remove protocol and port from given hostname. +    Return parsed string""" +    no_proto = re.split('^http(s)?\:\/\/', host) +    if no_proto: +        host = no_proto[-1] + +    no_port = re.split('\:[0-9]+(/)?$', host) +    if no_port: +        host = no_port[0] + +    return host + + +def main(): +    """Parse master-config file and populate inventory file.""" +    # set default values +    USER_CONFIG = os.environ.get('CONFIG') +    if not USER_CONFIG: +        USER_CONFIG = DEFAULT_USER_CONFIG_PATH + +    # read user configuration +    try: +        config_file_obj = open(USER_CONFIG, 'r') +        raw_config_file = config_file_obj.read() +        user_config = yaml.load(raw_config_file) +        if not user_config: +            user_config = dict() +    except IOError as err: +        print "Unable to find or read user configuration file '{}': {}".format(USER_CONFIG, err) +        exit(1) + +    master_config_path = user_config.get('master_config_path', DEFAULT_MASTER_CONFIG_PATH) +    if not master_config_path: +        master_config_path = DEFAULT_MASTER_CONFIG_PATH + +    admin_kubeconfig_path = user_config.get('admin_kubeconfig_path', DEFAULT_ADMIN_KUBECONFIG_PATH) +    if not admin_kubeconfig_path: +        admin_kubeconfig_path = DEFAULT_ADMIN_KUBECONFIG_PATH + +    try: +        file_obj = open(master_config_path, 'r') +    except IOError as err: +        print "Unable to find or read host master configuration file '{}': {}".format(master_config_path, err) +        exit(1) + +    raw_text = file_obj.read() + +    y = yaml.load(raw_text) +    if y.get("kind", "") != "MasterConfig": +        print "Bind-mounted host master configuration file is not of 'kind' MasterConfig. Aborting..." +        exit(1) + +    # finish reading config file and begin gathering +    # cluster information for inventory file +    file_obj.close() + +    # set inventory values based on user configuration +    ansible_ssh_user = user_config.get('ansible_ssh_user', 'root') +    ansible_become_user = user_config.get('ansible_become_user') + +    openshift_uninstall_images = user_config.get('openshift_uninstall_images', False) +    openshift_install_examples = user_config.get('openshift_install_examples', True) +    openshift_deployment_type = user_config.get('openshift_deployment_type', 'origin') + +    openshift_release = user_config.get('openshift_release') +    openshift_image_tag = user_config.get('openshift_image_tag') +    openshift_logging_image_version = user_config.get('openshift_logging_image_version') +    openshift_disable_check = user_config.get('openshift_disable_check') + +    # extract host config info from parsed yaml file +    asset_config = y.get("assetConfig") +    master_config = y.get("kubernetesMasterConfig") +    etcd_config = y.get("etcdClientInfo") + +    # if master_config is missing, error out; we expect to be running on a master to be able to +    # gather enough information to generate the rest of the inventory file. +    if not master_config: +        msg = "'kubernetesMasterConfig' missing from '{}'; unable to gather all necessary host information..." +        print msg.format(master_config_path) +        exit(1) + +    master_public_url = y.get("masterPublicURL") +    if not master_public_url: +        msg = "'kubernetesMasterConfig.masterPublicURL' missing from '{}'; Unable to connect to master host..." +        print msg.format(master_config_path) +        exit(1) + +    oc = OpenShiftClient(admin_kubeconfig_path) + +    # ensure kubeconfig is logged in with provided user, or fail with a friendly message otherwise +    try: +        oc.whoami() +    except OpenShiftClientError as err: +        msg = ("Unable to obtain user information using the provided kubeconfig file. " +               "Current context does not appear to be able to authenticate to the server. " +               "Error returned from server:\n\n{}") +        print msg.format(str(err)) +        exit(1) + +    # connect to remote host using the provided config and extract all possible node information +    nodes_config = yaml.load(oc.get_nodes()) + +    # contains host types (e.g. masters, nodes, etcd) +    host_groups = dict() +    openshift_hosted_logging_deploy = False +    is_etcd_deployed = master_config.get("storage-backend", "") in ["etcd3", "etcd2", "etcd"] + +    if asset_config and asset_config.get('loggingPublicURL'): +        openshift_hosted_logging_deploy = True + +    openshift_hosted_logging_deploy = user_config.get("openshift_hosted_logging_deploy", openshift_hosted_logging_deploy) + +    m = Host("masters") +    m.address(master_config["masterIP"]) +    m.public_host_name(master_public_url) +    host_groups["masters"] = HostGroup([m]) + +    if nodes_config: +        node_hosts = list() +        for node in nodes_config.get("items", []): +            if node["kind"] != "Node": +                continue + +            n = Host("nodes") + +            address = "" +            internal_hostname = "" +            for item in node["status"].get("addresses", []): +                if not address and item['type'] in ['InternalIP', 'LegacyHostIP']: +                    address = item['address'] + +                if item['type'] == 'Hostname': +                    internal_hostname = item['address'] + +            n.address(address) +            n.host_name(internal_hostname) +            node_hosts.append(n) + +        host_groups["nodes"] = HostGroup(node_hosts) + +    if etcd_config: +        etcd_hosts = list() +        for url in etcd_config.get("urls", []): +            e = Host("etcd") +            e.host_name(url) +            etcd_hosts.append(e) + +        host_groups["etcd"] = HostGroup(etcd_hosts) + +    # open new inventory file for writing +    if USE_STDOUT: +        inv_file_obj = sys.stdout +    else: +        try: +            inv_file_obj = open(INVENTORY_FULL_PATH, 'w+') +        except IOError as err: +            print "Unable to create or open generated inventory file: {}".format(err) +            exit(1) + +    inv_file_obj.write("[OSEv3:children]\n") +    for group in host_groups: +        inv_file_obj.write("{}\n".format(group)) +    inv_file_obj.write("\n") + +    inv_file_obj.write("[OSEv3:vars]\n") +    if ansible_ssh_user: +        inv_file_obj.write("ansible_ssh_user={}\n".format(ansible_ssh_user)) +    if ansible_become_user: +        inv_file_obj.write("ansible_become_user={}\n".format(ansible_become_user)) +        inv_file_obj.write("ansible_become=yes\n") + +    if openshift_uninstall_images: +        inv_file_obj.write("openshift_uninstall_images={}\n".format(str(openshift_uninstall_images))) +    if openshift_deployment_type: +        inv_file_obj.write("openshift_deployment_type={}\n".format(openshift_deployment_type)) +    if openshift_install_examples: +        inv_file_obj.write("openshift_install_examples={}\n".format(str(openshift_install_examples))) + +    if openshift_release: +        inv_file_obj.write("openshift_release={}\n".format(str(openshift_release))) +    if openshift_image_tag: +        inv_file_obj.write("openshift_image_tag={}\n".format(str(openshift_image_tag))) +    if openshift_logging_image_version: +        inv_file_obj.write("openshift_logging_image_version={}\n".format(str(openshift_logging_image_version))) +    if openshift_disable_check: +        inv_file_obj.write("openshift_disable_check={}\n".format(str(openshift_disable_check))) +    inv_file_obj.write("\n") + +    inv_file_obj.write("openshift_hosted_logging_deploy={}\n".format(str(openshift_hosted_logging_deploy))) +    inv_file_obj.write("\n") + +    for group in host_groups: +        inv_file_obj.write("[{}]\n".format(host_groups[group].get_group_name())) +        inv_file_obj.write(host_groups[group].string()) +        inv_file_obj.write("\n") + +    inv_file_obj.close() + + +if __name__ == '__main__': +    main() diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run index 9401ea118..51ac566e5 100755 --- a/images/installer/root/usr/local/bin/run +++ b/images/installer/root/usr/local/bin/run @@ -24,9 +24,12 @@ elif [[ -v INVENTORY_URL ]]; then  elif [[ -v DYNAMIC_SCRIPT_URL ]]; then    curl -o ${INVENTORY} ${DYNAMIC_SCRIPT_URL}    chmod 755 ${INVENTORY} +elif [[ -v GENERATE_INVENTORY ]]; then +  # dynamically generate inventory file using bind-mounted info +  /usr/local/bin/generate ${INVENTORY}  else    echo -  echo "One of INVENTORY_FILE, INVENTORY_URL or DYNAMIC_SCRIPT_URL must be provided." +  echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."    exec /usr/local/bin/usage  fi  INVENTORY_ARG="-i ${INVENTORY}" diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index dbe57bbd2..be15944d5 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -34,17 +34,17 @@ openshift_deployment_type=origin  # use this to lookup the latest exact version of the container images, which is the tag actually used to configure  # the cluster. For RPM installations we just verify the version detected in your configured repos matches this  # release. -openshift_release=v3.6 +openshift_release=v3.7  # Specify an exact container image tag to install or configure.  # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.  # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.6.0 +#openshift_image_tag=v3.7.0  # Specify an exact rpm version to install or configure.  # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.  # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.6.0 +#openshift_pkg_version=-3.7.0  # This enables all the system containers except for docker:  #openshift_use_system_containers=False @@ -538,7 +538,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics  # Configure the prefix and version for the component images  #openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_metrics_deployer_version=v3.6.0 +#openshift_hosted_metrics_deployer_version=v3.7.0  #  # StorageClass  # openshift_storageclass_name=gp2 @@ -593,7 +593,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_logging_elasticsearch_cluster_size=1  # Configure the prefix and version for the component images  #openshift_hosted_logging_deployer_prefix=docker.io/openshift/origin- -#openshift_hosted_logging_deployer_version=v3.6.0 +#openshift_hosted_logging_deployer_version=v3.7.0  # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')  # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 0d60de6d2..ad84e6aef 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -34,17 +34,17 @@ openshift_deployment_type=openshift-enterprise  # use this to lookup the latest exact version of the container images, which is the tag actually used to configure  # the cluster. For RPM installations we just verify the version detected in your configured repos matches this  # release. -openshift_release=v3.6 +openshift_release=v3.7  # Specify an exact container image tag to install or configure.  # WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.  # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.6.0 +#openshift_image_tag=v3.7.0  # Specify an exact rpm version to install or configure.  # WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.  # This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.6.0 +#openshift_pkg_version=-3.7.0  # This enables all the system containers except for docker:  #openshift_use_system_containers=False @@ -546,7 +546,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics  # Configure the prefix and version for the component images  #openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_metrics_deployer_version=3.6.0 +#openshift_hosted_metrics_deployer_version=3.7.0  #  # StorageClass  # openshift_storageclass_name=gp2 @@ -601,7 +601,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  #openshift_hosted_logging_elasticsearch_cluster_size=1  # Configure the prefix and version for the component images  #openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_logging_deployer_version=3.6.0 +#openshift_hosted_logging_deployer_version=3.7.0  # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')  # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' diff --git a/playbooks/byo/openshift-checks/roles b/playbooks/byo/openshift-checks/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/byo/openshift-checks/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-loadbalancer/config.yml b/playbooks/byo/openshift-loadbalancer/config.yml new file mode 100644 index 000000000..32c828f97 --- /dev/null +++ b/playbooks/byo/openshift-loadbalancer/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-loadbalancer/config.yml diff --git a/playbooks/byo/openshift-nfs/config.yml b/playbooks/byo/openshift-nfs/config.yml new file mode 100644 index 000000000..93b24411e --- /dev/null +++ b/playbooks/byo/openshift-nfs/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-nfs/config.yml diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 16a733899..e55b2f964 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -43,11 +43,14 @@    - name: Evaluate groups - Fail if no etcd hosts group is defined      fail:        msg: > -        No etcd hosts defined. Running an all-in-one master is deprecated and -        will no longer be supported in a future upgrade. +        Running etcd as an embedded service is no longer supported. If this is a +        new install please define an 'etcd' group with either one or three +        hosts. These hosts may be the same hosts as your masters. If this is an +        upgrade you may set openshift_master_unsupported_embedded_etcd=true +        until a migration playbook becomes available.      when: -    - g_etcd_hosts | default([]) | length == 0 -    - not openshift_master_unsupported_all_in_one | default(False) +    - g_etcd_hosts | default([]) | length not in [3,1] +    - not openshift_master_unsupported_embedded_etcd | default(False)      - not openshift_node_bootstrap | default(False)    - name: Evaluate oo_all_hosts diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 18f10437d..b75aae589 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -13,11 +13,11 @@        {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=* --confirm      register: l_pb_upgrade_control_plane_pre_upgrade_storage -    when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool +    when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool      failed_when: -    - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool +    - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 -    - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool +    - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool  # If facts cache were for some reason deleted, this fact may not be set, and if not set  # it will always default to true. This causes problems for the etcd data dir fact detection @@ -151,11 +151,11 @@        {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=clusterpolicies --confirm      register: l_pb_upgrade_control_plane_post_upgrade_storage -    when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool +    when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool      failed_when: -    - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool +    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 -    - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool +    - openshift_upgrade_post_storage_migration_fatal | default(false) | bool      run_once: true      delegate_to: "{{ groups.oo_first_master.0 }}" @@ -247,11 +247,11 @@        migrate storage --include=* --confirm      run_once: true      register: l_pb_upgrade_control_plane_post_upgrade_storage -    when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool +    when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool      failed_when: -    - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool +    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 -    - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool +    - openshift_upgrade_post_storage_migration_fatal | default(false) | bool    - set_fact:        reconcile_complete: True diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml index a2af7bb21..e4ab0aa41 100644 --- a/playbooks/common/openshift-etcd/migrate.yml +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -69,7 +69,7 @@    - role: etcd_migrate      r_etcd_migrate_action: migrate      r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -    etcd_peer: "{{ ansible_default_ipv4.address }}" +    etcd_peer: "{{ openshift.common.ip }}"      etcd_url_scheme: "https"      etcd_peer_url_scheme: "https" @@ -80,7 +80,7 @@    - role: etcd_migrate      r_etcd_migrate_action: clean_data      r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -    etcd_peer: "{{ ansible_default_ipv4.address }}" +    etcd_peer: "{{ openshift.common.ip }}"      etcd_url_scheme: "https"      etcd_peer_url_scheme: "https"    post_tasks: @@ -115,7 +115,7 @@    roles:    - role: etcd_migrate      r_etcd_migrate_action: add_ttls -    etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}" +    etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"      etcd_url_scheme: "https"      etcd_peer_url_scheme: "https"      when: etcd_migration_failed | length == 0 diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 5f8bb1c7a..d3fa48bad 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -23,6 +23,9 @@                         -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}                         member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}      delegate_to: "{{ etcd_ca_host }}" +    failed_when: +    - etcd_add_check.rc == 1 +    - ("peerURL exists" not in etcd_add_check.stderr)      register: etcd_add_check      retries: 3      delay: 10 @@ -53,3 +56,19 @@      retries: 3      delay: 30      until: scaleup_health.rc == 0 + +- name: Update master etcd client urls +  hosts: oo_masters_to_config +  serial: 1 +  tasks: +  - include_role: +      name: openshift_master +      tasks_from: update_etcd_client_urls +    vars: +      etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" +      openshift_ca_host: "{{ groups.oo_first_master.0 }}" +      openshift_master_etcd_hosts: "{{ hostvars +                                       | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'])) +                                       | oo_collect('openshift.common.hostname') +                                       | default(none, true) }}" +      openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml index 000e46e80..64ea0d3c4 100644 --- a/playbooks/common/openshift-nfs/config.yml +++ b/playbooks/common/openshift-nfs/config.yml @@ -2,5 +2,5 @@  - name: Configure nfs    hosts: oo_nfs_to_config    roles: -  - role: openshift_facts +  - role: os_firewall    - role: openshift_storage_nfs diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml index 145b552a6..d685d77f2 100644 --- a/roles/docker/tasks/package_docker.yml +++ b/roles/docker/tasks/package_docker.yml @@ -3,6 +3,8 @@    command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"    when: not openshift.common.is_atomic | bool    register: curr_docker_version +  retries: 4 +  until: not curr_docker_version | failed    changed_when: false  - name: Error out if Docker pre-installed but too old diff --git a/roles/lib_utils/library/repoquery.py b/roles/lib_utils/library/repoquery.py index 95a305b58..e5ac1f74f 100644 --- a/roles/lib_utils/library/repoquery.py +++ b/roles/lib_utils/library/repoquery.py @@ -35,6 +35,7 @@ import os  # noqa: F401  import re  # noqa: F401  import shutil  # noqa: F401  import tempfile  # noqa: F401 +import time  # noqa: F401  try:      import ruamel.yaml as yaml  # noqa: F401 @@ -618,17 +619,22 @@ def main():              show_duplicates=dict(default=False, required=False, type='bool'),              match_version=dict(default=None, required=False, type='str'),              ignore_excluders=dict(default=False, required=False, type='bool'), +            retries=dict(default=4, required=False, type='int'), +            retry_interval=dict(default=5, required=False, type='int'),          ),          supports_check_mode=False,          required_if=[('show_duplicates', True, ['name'])],      ) -    rval = Repoquery.run_ansible(module.params, module.check_mode) - -    if 'failed' in rval: -        module.fail_json(**rval) - -    module.exit_json(**rval) +    tries = 1 +    while True: +        rval = Repoquery.run_ansible(module.params, module.check_mode) +        if 'failed' not in rval: +            module.exit_json(**rval) +        elif tries > module.params['retries']: +            module.fail_json(**rval) +        tries += 1 +        time.sleep(module.params['retry_interval'])  if __name__ == "__main__": diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py index baf72fe47..921bca074 100644 --- a/roles/lib_utils/library/yedit.py +++ b/roles/lib_utils/library/yedit.py @@ -35,6 +35,7 @@ import os  # noqa: F401  import re  # noqa: F401  import shutil  # noqa: F401  import tempfile  # noqa: F401 +import time  # noqa: F401  try:      import ruamel.yaml as yaml  # noqa: F401 diff --git a/roles/lib_utils/src/ansible/repoquery.py b/roles/lib_utils/src/ansible/repoquery.py index 40773b1c1..5f5b93639 100644 --- a/roles/lib_utils/src/ansible/repoquery.py +++ b/roles/lib_utils/src/ansible/repoquery.py @@ -19,17 +19,22 @@ def main():              show_duplicates=dict(default=False, required=False, type='bool'),              match_version=dict(default=None, required=False, type='str'),              ignore_excluders=dict(default=False, required=False, type='bool'), +            retries=dict(default=4, required=False, type='int'), +            retry_interval=dict(default=5, required=False, type='int'),          ),          supports_check_mode=False,          required_if=[('show_duplicates', True, ['name'])],      ) -    rval = Repoquery.run_ansible(module.params, module.check_mode) - -    if 'failed' in rval: -        module.fail_json(**rval) - -    module.exit_json(**rval) +    tries = 1 +    while True: +        rval = Repoquery.run_ansible(module.params, module.check_mode) +        if 'failed' not in rval: +            module.exit_json(**rval) +        elif tries > module.params['retries']: +            module.fail_json(**rval) +        tries += 1 +        time.sleep(module.params['retry_interval'])  if __name__ == "__main__": diff --git a/roles/lib_utils/src/lib/import.py b/roles/lib_utils/src/lib/import.py index 567f8c9e0..07a04b7ae 100644 --- a/roles/lib_utils/src/lib/import.py +++ b/roles/lib_utils/src/lib/import.py @@ -10,6 +10,7 @@ import os  # noqa: F401  import re  # noqa: F401  import shutil  # noqa: F401  import tempfile  # noqa: F401 +import time  # noqa: F401  try:      import ruamel.yaml as yaml  # noqa: F401 diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index ebfa6bb8f..517e0231d 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1602,11 +1602,13 @@ def set_builddefaults_facts(facts):              builddefaults['git_no_proxy'] = builddefaults['no_proxy']          # If we're actually defining a builddefaults config then create admission_plugin_config          # then merge builddefaults[config] structure into admission_plugin_config + +        # 'config' is the 'openshift_builddefaults_json' inventory variable          if 'config' in builddefaults:              if 'admission_plugin_config' not in facts['master']: -                facts['master']['admission_plugin_config'] = dict() +                # Scaffold out the full expected datastructure +                facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}              facts['master']['admission_plugin_config'].update(builddefaults['config']) -            # if the user didn't actually provide proxy values, delete the proxy env variable defaults.              delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])      return facts diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml index 11478263c..72754df2e 100644 --- a/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_PREFIX}registry-console +            name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.3/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_NAME} +            name: ${IMAGE_NAME}:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml index 0e3d006a7..298f8039e 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_PREFIX}registry-console +            name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_NAME} +            name: ${IMAGE_NAME}:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml index 28feac4e6..dace26793 100644 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_PREFIX}registry-console +            name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_NAME} +            name: ${IMAGE_NAME}:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 8bf98ba41..f821efd6b 100644 --- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_PREFIX}registry-console +            name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_NAME} +            name: ${IMAGE_NAME}:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml index bbaf76c17..019d836fe 100644 --- a/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/enterprise/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_PREFIX}registry-console +            name: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml index 80cc4233b..6811ece28 100644 --- a/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.7/origin/registry-console.yaml @@ -89,7 +89,7 @@ objects:          - annotations: null            from:              kind: DockerImage -            name: ${IMAGE_NAME} +            name: ${IMAGE_NAME}:${IMAGE_VERSION}            name: ${IMAGE_VERSION}    - kind: OAuthClient      apiVersion: v1 diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 7ccc2fc3b..f142f89f0 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -3,6 +3,9 @@ manage_iq_tasks:  - resource_kind: role    resource_name: admin    user: management-admin +- resource_kind: role +  resource_name: admin +  user: system:serviceaccount:management-infra:management-admin  - resource_kind: cluster-role    resource_name: management-infra-admin    user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/tasks/update_etcd_client_urls.yml b/roles/openshift_master/tasks/update_etcd_client_urls.yml new file mode 100644 index 000000000..1ab105808 --- /dev/null +++ b/roles/openshift_master/tasks/update_etcd_client_urls.yml @@ -0,0 +1,8 @@ +--- +- yedit: +    src: "{{ openshift.common.config_base }}/master/master-config.yaml" +    key: 'etcdClientInfo.urls' +    value: "{{ openshift.master.etcd_urls }}" +  notify: +  - restart master api +  - restart master controllers diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index 5558f55cb..56c864ec7 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -380,11 +380,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):          if 'extra_authorize_parameters' in self._idp:              self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters') -        if 'extraAuthorizeParameters' in self._idp: -            if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']: -                val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes')) -                self._idp['extraAuthorizeParameters']['include_granted_scopes'] = '"true"' if val else '"false"' -      def validate(self):          ''' validate this idp instance '''          IdentityProviderOauthBase.validate(self) diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 7af3f54b5..2759188f3 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -2,18 +2,6 @@  - name: Install the systemd units    include: systemd_units.yml -- name: Check for tuned package -  command: rpm -q tuned -  args: -    warn: no -  register: tuned_installed -  changed_when: false -  failed_when: false - -- name: Set atomic-guest tuned profile -  command: "tuned-adm profile atomic-guest" -  when: tuned_installed.rc == 0 and openshift.common.is_atomic | bool -  - name: Start and enable openvswitch service    systemd:      name: openvswitch.service @@ -107,5 +95,9 @@      msg: Node failed to start please inspect the logs and try again    when: node_start_result | failed +- name: Setup tuned +  include: tuned.yml +  static: yes +  - set_fact:      node_service_status_changed: "{{ node_start_result | changed }}" diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index 02b8ee67c..265bf2c46 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,11 +1,9 @@  --- -# We have to add tuned-profiles in the same transaction otherwise we run into depsolving -# problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.  - when: not openshift.common.is_containerized | bool    block:    - name: Install Node package      package: -      name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present    - name: Install sdn-ovs package diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index a059745a6..d0bc0e028 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -76,10 +76,11 @@ GlusterFS cluster into a new or existing OpenShift cluster:  | Name                                             | Default value           | Description                             |  |--------------------------------------------------|-------------------------|-----------------------------------------|  | openshift_storage_glusterfs_timeout              | 300                     | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace            | 'glusterfs'             | Namespace in which to create GlusterFS resources +| openshift_storage_glusterfs_namespace            | 'glusterfs'             | Namespace/project in which to create GlusterFS resources  | openshift_storage_glusterfs_is_native            | True                    | GlusterFS should be containerized  | openshift_storage_glusterfs_name                 | 'storage'               | A name to identify the GlusterFS cluster, which will be used in resource names  | openshift_storage_glusterfs_nodeselector         | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector | False                   | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels.  | openshift_storage_glusterfs_storageclass         | True                    | Automatically create a StorageClass for each GlusterFS cluster  | openshift_storage_glusterfs_image                | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'  | openshift_storage_glusterfs_version              | 'latest'                | Container image version to use for GlusterFS pods @@ -91,7 +92,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:  | openshift_storage_glusterfs_heketi_admin_key     | auto-generated          | String to use as secret key for performing heketi commands as admin  | openshift_storage_glusterfs_heketi_user_key      | auto-generated          | String to use as secret key for performing heketi commands as user that can only view or modify volumes  | openshift_storage_glusterfs_heketi_topology_load | True                    | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url           | Undefined               | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the full URL to the heketi service. +| openshift_storage_glusterfs_heketi_url           | Undefined               | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service.  | openshift_storage_glusterfs_heketi_port          | 8080                    | TCP port for external heketi service **NOTE:** This has no effect in native mode  | openshift_storage_glusterfs_heketi_executor      | 'kubernetes'            | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes  | openshift_storage_glusterfs_heketi_ssh_port      | 22                      | SSH port for external GlusterFS nodes via native heketi diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 0b3d3aef1..148549887 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -3,6 +3,7 @@ openshift_storage_glusterfs_timeout: 300  openshift_storage_glusterfs_is_native: True  openshift_storage_glusterfs_name: 'storage'  openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_glusterfs_name }}-host" +openshift_storage_glusterfs_use_default_selector: False  openshift_storage_glusterfs_storageclass: True  openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"  openshift_storage_glusterfs_version: 'latest' @@ -31,6 +32,7 @@ openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.na  openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"  openshift_storage_glusterfs_registry_name: 'registry'  openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host" +openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"  openshift_storage_glusterfs_registry_storageclass: False  openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"  openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" @@ -58,9 +60,9 @@ r_openshift_storage_glusterfs_os_firewall_deny: []  r_openshift_storage_glusterfs_os_firewall_allow:  - service: glusterfs_sshd    port: "2222/tcp" -- service: glusterfs_daemon -  port: "24007/tcp"  - service: glusterfs_management +  port: "24007/tcp" +- service: glusterfs_rdma    port: "24008/tcp"  - service: glusterfs_bricks    port: "49152-49251/tcp" diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml new file mode 100644 index 000000000..9ebb0d5ec --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml @@ -0,0 +1,143 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: deploy-heketi +  labels: +    glusterfs: heketi-template +    deploy-heketi: support +  annotations: +    description: Bootstrap Heketi installation +    tags: glusterfs,heketi,installation +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-service +      deploy-heketi: support +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: deploy-heketi-${CLUSTER_NAME} +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-route +      deploy-heketi: support +  spec: +    to: +      kind: Service +      name: deploy-heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: deploy-heketi-${CLUSTER_NAME} +    labels: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-dc +      deploy-heketi: support +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: deploy-heketi +        labels: +          glusterfs: deploy-heketi-${CLUSTER_NAME}-pod +          deploy-heketi: support +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: topology +            mountPath: ${TOPOLOGY_PATH} +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +        - name: topology +          secret: +            secretName: heketi-${CLUSTER_NAME}-topology-secret +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs +- name: TOPOLOGY_PATH +  displayName: heketi topology file location +  required: True diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml new file mode 100644 index 000000000..8c5e1ded3 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml @@ -0,0 +1,136 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: glusterfs +  labels: +    glusterfs: template +  annotations: +    description: GlusterFS DaemonSet template +    tags: glusterfs +objects: +- kind: DaemonSet +  apiVersion: extensions/v1beta1 +  metadata: +    name: glusterfs-${CLUSTER_NAME} +    labels: +      glusterfs: ${CLUSTER_NAME}-daemonset +    annotations: +      description: GlusterFS DaemonSet +      tags: glusterfs +  spec: +    selector: +      matchLabels: +        glusterfs: ${CLUSTER_NAME}-pod +    template: +      metadata: +        name: glusterfs-${CLUSTER_NAME} +        labels: +          glusterfs: ${CLUSTER_NAME}-pod +          glusterfs-node: pod +      spec: +        nodeSelector: "${{NODE_LABELS}}" +        hostNetwork: true +        containers: +        - name: glusterfs +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          volumeMounts: +          - name: glusterfs-heketi +            mountPath: "/var/lib/heketi" +          - name: glusterfs-run +            mountPath: "/run" +          - name: glusterfs-lvm +            mountPath: "/run/lvm" +          - name: glusterfs-etc +            mountPath: "/etc/glusterfs" +          - name: glusterfs-logs +            mountPath: "/var/log/glusterfs" +          - name: glusterfs-config +            mountPath: "/var/lib/glusterd" +          - name: glusterfs-dev +            mountPath: "/dev" +          - name: glusterfs-misc +            mountPath: "/var/lib/misc/glusterfsd" +          - name: glusterfs-cgroup +            mountPath: "/sys/fs/cgroup" +            readOnly: true +          - name: glusterfs-ssl +            mountPath: "/etc/ssl" +            readOnly: true +          securityContext: +            capabilities: {} +            privileged: true +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 40 +            exec: +              command: +              - "/bin/bash" +              - "-c" +              - systemctl status glusterd.service +            periodSeconds: 25 +            successThreshold: 1 +            failureThreshold: 15 +          resources: {} +          terminationMessagePath: "/dev/termination-log" +        volumes: +        - name: glusterfs-heketi +          hostPath: +            path: "/var/lib/heketi" +        - name: glusterfs-run +          emptyDir: {} +        - name: glusterfs-lvm +          hostPath: +            path: "/run/lvm" +        - name: glusterfs-etc +          hostPath: +            path: "/etc/glusterfs" +        - name: glusterfs-logs +          hostPath: +            path: "/var/log/glusterfs" +        - name: glusterfs-config +          hostPath: +            path: "/var/lib/glusterd" +        - name: glusterfs-dev +          hostPath: +            path: "/dev" +        - name: glusterfs-misc +          hostPath: +            path: "/var/lib/misc/glusterfsd" +        - name: glusterfs-cgroup +          hostPath: +            path: "/sys/fs/cgroup" +        - name: glusterfs-ssl +          hostPath: +            path: "/etc/ssl" +        restartPolicy: Always +        terminationGracePeriodSeconds: 30 +        dnsPolicy: ClusterFirst +        securityContext: {} +parameters: +- name: NODE_LABELS +  displayName: Daemonset Node Labels +  description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\' +  value: '{ "glusterfs": "storage-host" }' +- name: IMAGE_NAME +  displayName: GlusterFS container image name +  required: True +- name: IMAGE_VERSION +  displayName: GlusterFS container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances +  value: storage diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml new file mode 100644 index 000000000..61b6a8c13 --- /dev/null +++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml @@ -0,0 +1,134 @@ +--- +kind: Template +apiVersion: v1 +metadata: +  name: heketi +  labels: +    glusterfs: heketi-template +  annotations: +    description: Heketi service deployment template +    tags: glusterfs,heketi +objects: +- kind: Service +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-service +    annotations: +      description: Exposes Heketi service +  spec: +    ports: +    - name: heketi +      port: 8080 +      targetPort: 8080 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +- kind: Route +  apiVersion: v1 +  metadata: +    name: ${HEKETI_ROUTE} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-route +  spec: +    to: +      kind: Service +      name: heketi-${CLUSTER_NAME} +- kind: DeploymentConfig +  apiVersion: v1 +  metadata: +    name: heketi-${CLUSTER_NAME} +    labels: +      glusterfs: heketi-${CLUSTER_NAME}-dc +    annotations: +      description: Defines how to deploy Heketi +  spec: +    replicas: 1 +    selector: +      glusterfs: heketi-${CLUSTER_NAME}-pod +    triggers: +    - type: ConfigChange +    strategy: +      type: Recreate +    template: +      metadata: +        name: heketi-${CLUSTER_NAME} +        labels: +          glusterfs: heketi-${CLUSTER_NAME}-pod +      spec: +        serviceAccountName: heketi-${CLUSTER_NAME}-service-account +        containers: +        - name: heketi +          image: ${IMAGE_NAME}:${IMAGE_VERSION} +          imagePullPolicy: IfNotPresent +          env: +          - name: HEKETI_USER_KEY +            value: ${HEKETI_USER_KEY} +          - name: HEKETI_ADMIN_KEY +            value: ${HEKETI_ADMIN_KEY} +          - name: HEKETI_EXECUTOR +            value: ${HEKETI_EXECUTOR} +          - name: HEKETI_FSTAB +            value: /var/lib/heketi/fstab +          - name: HEKETI_SNAPSHOT_LIMIT +            value: '14' +          - name: HEKETI_KUBE_GLUSTER_DAEMONSET +            value: '1' +          - name: HEKETI_KUBE_NAMESPACE +            value: ${HEKETI_KUBE_NAMESPACE} +          ports: +          - containerPort: 8080 +          volumeMounts: +          - name: db +            mountPath: /var/lib/heketi +          - name: config +            mountPath: /etc/heketi +          readinessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 3 +            httpGet: +              path: /hello +              port: 8080 +          livenessProbe: +            timeoutSeconds: 3 +            initialDelaySeconds: 30 +            httpGet: +              path: /hello +              port: 8080 +        volumes: +        - name: db +          glusterfs: +            endpoints: heketi-db-${CLUSTER_NAME}-endpoints +            path: heketidbstorage +        - name: config +          secret: +            secretName: heketi-${CLUSTER_NAME}-config-secret +parameters: +- name: HEKETI_USER_KEY +  displayName: Heketi User Secret +  description: Set secret for those creating volumes as type _user_ +- name: HEKETI_ADMIN_KEY +  displayName: Heketi Administrator Secret +  description: Set secret for administration of the Heketi service as user _admin_ +- name: HEKETI_EXECUTOR +  displayName: heketi executor type +  description: Set the executor type, kubernetes or ssh +  value: kubernetes +- name: HEKETI_KUBE_NAMESPACE +  displayName: Namespace +  description: Set the namespace where the GlusterFS pods reside +  value: default +- name: HEKETI_ROUTE +  displayName: heketi route name +  description: Set the hostname for the route URL +  value: "heketi-glusterfs" +- name: IMAGE_NAME +  displayName: heketi container image name +  required: True +- name: IMAGE_VERSION +  displayName: heketi container image version +  required: True +- name: CLUSTER_NAME +  displayName: GlusterFS cluster name +  description: A unique name to identify this heketi service, useful for running multiple heketi instances +  value: glusterfs diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index a31c5bd5e..bc0dde17d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -15,6 +15,7 @@    oc_project:      state: present      name: "{{ glusterfs_namespace }}" +    node_selector: "{% if glusterfs_use_default_selector %}{{ omit }}{% endif %}"    when: glusterfs_is_native or glusterfs_heketi_is_native or glusterfs_storageclass  - name: Delete pre-existing heketi resources diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index 7a2987883..012c722ff 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -5,6 +5,7 @@      glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native | bool }}"      glusterfs_name: "{{ openshift_storage_glusterfs_name }}"      glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | default(['storagenode', openshift_storage_glusterfs_name] | join('=')) | map_from_pairs }}" +    glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_use_default_selector }}"      glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"      glusterfs_image: "{{ openshift_storage_glusterfs_image }}"      glusterfs_version: "{{ openshift_storage_glusterfs_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 17f87578d..1bcab8e49 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -5,6 +5,7 @@      glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native | bool }}"      glusterfs_name: "{{ openshift_storage_glusterfs_registry_name }}"      glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | default(['storagenode', openshift_storage_glusterfs_registry_name] | join('=')) | map_from_pairs }}" +    glusterfs_use_default_selector: "{{ openshift_storage_glusterfs_registry_use_default_selector }}"      glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"      glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"      glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 new file mode 100644 index 000000000..11c9195bb --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 new file mode 100644 index 000000000..3f869d2b7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-registry-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: glusterfs-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 new file mode 100644 index 000000000..095fb780f --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/glusterfs-storageclass.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }} +provisioner: kubernetes.io/glusterfs +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +{% if glusterfs_heketi_admin_key is defined %} +  secretNamespace: "{{ glusterfs_namespace }}" +  secretName: "heketi-{{ glusterfs_name }}-admin-secret" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 new file mode 100644 index 000000000..99cbdf748 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-endpoints.yml.j2 @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: Endpoints +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +subsets: +- addresses: +{% for node in glusterfs_nodes %} +  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} +{% endfor %} +  ports: +  - port: 1 diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 new file mode 100644 index 000000000..dcb896441 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi-service.yml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Service +metadata: +  name: heketi-db-{{ glusterfs_name }}-endpoints +spec: +  ports: +  - port: 1 +status: +  loadBalancer: {} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 new file mode 100644 index 000000000..579b11bb7 --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 @@ -0,0 +1,36 @@ +{ +	"_port_comment": "Heketi Server Port Number", +	"port" : "8080", + +	"_use_auth": "Enable JWT authorization. Please enable for deployment", +	"use_auth" : false, + +	"_jwt" : "Private keys for access", +	"jwt" : { +		"_admin" : "Admin has access to all APIs", +		"admin" : { +			"key" : "My Secret" +		}, +		"_user" : "User only has access to /volumes endpoint", +		"user" : { +			"key" : "My Secret" +		} +	}, + +	"_glusterfs_comment": "GlusterFS Configuration", +	"glusterfs" : { + +		"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh", +		"executor" : "{{ glusterfs_heketi_executor }}", + +		"_db_comment": "Database file name", +		"db" : "/var/lib/heketi/heketi.db", + +		"sshexec" : { +			"keyfile" : "/etc/heketi/private_key", +			"port" : "{{ glusterfs_heketi_ssh_port }}", +			"user" : "{{ glusterfs_heketi_ssh_user }}", +			"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }} +		} +	} +} diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 new file mode 100644 index 000000000..d6c28f6dd --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/topology.json.j2 @@ -0,0 +1,49 @@ +{ +  "clusters": [ +{%- set clusters = {} -%} +{%- for node in glusterfs_nodes -%} +  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} +  {%- if cluster in clusters -%} +    {%- set _dummy = clusters[cluster].append(node) -%} +  {%- else -%} +    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%} +  {%- endif -%} +{%- endfor -%} +{%- for cluster in clusters -%} +    { +      "nodes": [ +{%- for node in clusters[cluster] -%} +        { +          "node": { +            "hostnames": { +              "manage": [ +{%- if 'glusterfs_hostname' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_hostname }}" +{%- elif 'openshift' in hostvars[node] -%} +                "{{ hostvars[node].openshift.node.nodename }}" +{%- else -%} +                "{{ node }}" +{%- endif -%} +              ], +              "storage": [ +{%- if 'glusterfs_ip' in hostvars[node] -%} +                "{{ hostvars[node].glusterfs_ip }}" +{%- else -%} +                "{{ hostvars[node].openshift.common.ip }}" +{%- endif -%} +              ] +            }, +            "zone": {{ hostvars[node].glusterfs_zone | default(1) }} +          }, +          "devices": [ +{%- for device in hostvars[node].glusterfs_devices -%} +            "{{ device }}"{% if not loop.last %},{% endif %} +{%- endfor -%} +          ] +        }{% if not loop.last %},{% endif %} +{%- endfor -%} +      ] +    }{% if not loop.last %},{% endif %} +{%- endfor -%} +  ] +}  | 
