summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/defaults/main.yml2
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/package_docker.yml5
-rw-r--r--roles/docker/tasks/registry_auth.yml16
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml6
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml8
-rw-r--r--roles/docker/templates/crio.conf.j25
-rw-r--r--roles/installer_checkpoint/README.md55
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py71
-rw-r--r--roles/lib_openshift/library/oc_edit.py15
-rw-r--r--roles/lib_openshift/src/ansible/oc_edit.py15
-rw-r--r--roles/lib_utils/library/docker_creds.py207
-rw-r--r--roles/openshift_aws/defaults/main.yml14
-rw-r--r--roles/openshift_aws/files/describeinstances.json15
-rw-r--r--roles/openshift_aws/files/trustpolicy.json12
-rw-r--r--roles/openshift_aws/tasks/build_node_group.yml3
-rw-r--r--roles/openshift_aws/tasks/iam_role.yml36
-rw-r--r--roles/openshift_aws/tasks/launch_config_create.yml4
-rw-r--r--roles/openshift_ca/tasks/main.yml4
-rw-r--r--roles/openshift_cloud_provider/tasks/openstack.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py52
-rw-r--r--roles/openshift_hosted/tasks/secure.yml2
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_logging/handlers/main.yml4
-rw-r--r--roles/openshift_manage_node/tasks/main.yml4
-rw-r--r--roles/openshift_master/defaults/main.yml1
-rw-r--r--roles/openshift_master/handlers/main.yml4
-rw-r--r--roles/openshift_master/tasks/main.yml8
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml20
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j244
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j24
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml4
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py23
-rw-r--r--roles/openshift_metrics/handlers/main.yml4
-rw-r--r--roles/openshift_node/defaults/main.yml3
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml1
-rw-r--r--roles/openshift_node/tasks/config.yml1
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml19
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j22
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml1
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml19
-rw-r--r--roles/openshift_storage_glusterfs/README.md15
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml31
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml12
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml105
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml20
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml16
-rw-r--r--roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml113
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml66
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml96
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml1
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml15
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js1
-rw-r--r--roles/template_service_broker/tasks/install.yml5
62 files changed, 1115 insertions, 347 deletions
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index c086c28df..224844a06 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -2,6 +2,8 @@
docker_cli_auth_config_path: '/root/.docker'
openshift_docker_signature_verification: False
+openshift_docker_alternative_creds: False
+
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
oreg_auth_credentials_replace: False
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index 62b8a2eb5..d5faae8df 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -12,3 +12,4 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_os_firewall
+- role: lib_utils
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index c1aedf879..8121163a6 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -154,6 +154,7 @@
- set_fact:
docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
-- include: registry_auth.yml
-
- meta: flush_handlers
+
+# This needs to run after docker is restarted to account for proxy settings.
+- include: registry_auth.yml
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml
index d05b7f2b8..2c7bc5711 100644
--- a/roles/docker/tasks/registry_auth.yml
+++ b/roles/docker/tasks/registry_auth.yml
@@ -12,5 +12,21 @@
delay: 5
until: openshift_docker_credentials_create_res.rc == 0
when:
+ - not openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for docker cli registry auth (alternative)
+ docker_creds:
+ path: "{{ docker_cli_auth_config_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
- oreg_auth_user is defined
- (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 1e2d64293..3fe10454d 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -179,3 +179,9 @@
register: start_result
- meta: flush_handlers
+
+# If we are using crio only, docker.service might not be available for
+# 'docker login'
+- include: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index aa3b35ddd..84220fa66 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -173,6 +173,10 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
-- include: registry_auth.yml
-
- meta: flush_handlers
+
+# Since docker is running as a system container, docker login will fail to create
+# credentials. Use alternate method if requiring authenticated registries.
+- include: registry_auth.yml
+ vars:
+ openshift_docker_alternative_creds: True
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index 93014a80d..3f066a17f 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -103,6 +103,11 @@ cgroup_manager = "systemd"
# hooks_dir_path is the oci hooks directory for automatically executed hooks
hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+# default_mounts is the mounts list to be mounted for the container when created
+default_mounts = [
+ "/usr/share/rhel/secrets:/run/secrets",
+]
+
# pids_limit is the number of processes allowed in a container
pids_limit = 1024
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index 83e00e504..18d182ec9 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -92,31 +92,34 @@ phase/component and then a final play for setting `installer_hase_initialize` to
# common/openshift-cluster/std_include.yml
---
- name: Initialization Checkpoint Start
- hosts: oo_all_hosts
+ hosts: all
gather_facts: false
roles:
- installer_checkpoint
tasks:
- name: Set install initialization 'In Progress'
+ run_once: true
set_stats:
data:
- installer_phase_initialize: "In Progress"
- aggregate: false
+ installer_phase_initialize:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
#...
# Various plays here
#...
- name: Initialization Checkpoint End
- hosts: localhost
- connection: local
+ hosts: all
gather_facts: false
tasks:
- name: Set install initialization 'Complete'
+ run_once: true
set_stats:
data:
- installer_phase_initialize: "Complete"
- aggregate: false
+ installer_phase_initialize:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
```
Each phase or component of the installer will follow a similar pattern, with the
@@ -139,37 +142,25 @@ localhost : ok=24 changed=0 unreachable=0 failed=0
INSTALLER STATUS ***************************************************************
-Initialization : Complete
-etcd Install : Complete
-NFS Install : Not Started
-Load balancer Install : Not Started
-Master Install : Complete
-Master Additional Install : Complete
-Node Install : Complete
-GlusterFS Install : Not Started
-Hosted Install : Complete
-Metrics Install : Not Started
-Logging Install : Not Started
-Service Catalog Install : Not Started
+Initialization : Complete (0:02:14)
+Health Check : Complete (0:01:10)
+etcd Install : Complete (0:02:01)
+Master Install : Complete (0:11:43)
+Master Additional Install : Complete (0:00:54)
+Node Install : Complete (0:14:11)
+Hosted Install : Complete (0:03:28)
```
Example display if a failure occurs during execution:
```
INSTALLER STATUS ***************************************************************
-Initialization : Complete
-etcd Install : Complete
-NFS Install : Not Started
-Load balancer Install : Not Started
-Master Install : In Progress
- This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
-Master Additional Install : Not Started
-Node Install : Not Started
-GlusterFS Install : Not Started
-Hosted Install : Not Started
-Metrics Install : Not Started
-Logging Install : Not Started
-Service Catalog Install : Not Started
+Initialization : Complete (0:02:14)
+Health Check : Complete (0:01:10)
+etcd Install : Complete (0:02:58)
+Master Install : Complete (0:09:20)
+Master Additional Install : In Progress (0:20:04)
+ This phase can be restarted by running: playbooks/byo/openshift-master/additional_config.yml
```
[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index b17358882..556e9127f 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -1,60 +1,10 @@
"""Ansible callback plugin to print a summary completion status of installation
phases.
"""
+from datetime import datetime
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
-DOCUMENTATION = '''
-
-'''
-
-EXAMPLES = '''
----------------------------------------------
-Example display of a successful playbook run:
-
-PLAY RECAP *********************************************************************
-master01.example.com : ok=158 changed=16 unreachable=0 failed=0
-node01.example.com : ok=469 changed=74 unreachable=0 failed=0
-node02.example.com : ok=157 changed=17 unreachable=0 failed=0
-localhost : ok=24 changed=0 unreachable=0 failed=0
-
-
-INSTALLER STATUS ***************************************************************
-Initialization : Complete
-etcd Install : Complete
-NFS Install : Not Started
-Load balancer Install : Not Started
-Master Install : Complete
-Master Additional Install : Complete
-Node Install : Complete
-GlusterFS Install : Not Started
-Hosted Install : Complete
-Metrics Install : Not Started
-Logging Install : Not Started
-Prometheus Install : Not Started
-Service Catalog Install : Not Started
-
------------------------------------------------------
-Example display if a failure occurs during execution:
-
-INSTALLER STATUS ***************************************************************
-Initialization : Complete
-etcd Install : Complete
-NFS Install : Not Started
-Load balancer Install : Not Started
-Master Install : In Progress
- This phase can be restarted by running: playbooks/byo/openshift-master/config.yml
-Master Additional Install : Not Started
-Node Install : Not Started
-GlusterFS Install : Not Started
-Hosted Install : Not Started
-Metrics Install : Not Started
-Logging Install : Not Started
-Prometheus Install : Not Started
-Service Catalog Install : Not Started
-
-'''
-
class CallbackModule(CallbackBase):
"""This callback summarizes installation phase status."""
@@ -163,9 +113,10 @@ class CallbackModule(CallbackBase):
phase_title = phase_attributes[phase]['title']
padding = max_column - len(phase_title) + 2
if phase in stats.custom['_run']:
- phase_status = stats.custom['_run'][phase]
+ phase_status = stats.custom['_run'][phase]['status']
+ phase_time = phase_time_delta(stats.custom['_run'][phase])
self._display.display(
- '{}{}: {}'.format(phase_title, ' ' * padding, phase_status),
+ '{}{}: {} ({})'.format(phase_title, ' ' * padding, phase_status, phase_time),
color=self.phase_color(phase_status))
if phase_status == 'In Progress' and phase != 'installer_phase_initialize':
self._display.display(
@@ -192,3 +143,17 @@ class CallbackModule(CallbackBase):
phase_color = C.COLOR_WARN
return phase_color
+
+
+def phase_time_delta(phase):
+ """ Calculate the difference between phase start and end times """
+ time_format = '%Y%m%d%H%M%SZ'
+ phase_start = datetime.strptime(phase['start'], time_format)
+ if 'end' not in phase:
+ # The phase failed so set the end time to now
+ phase_end = datetime.now()
+ else:
+ phase_end = datetime.strptime(phase['end'], time_format)
+ delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds
+
+ return delta
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 0b6a8436b..0cea07256 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1556,20 +1556,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'rc', 'replicationcontroller',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- 'pv', 'persistentvolume']),
+ kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
diff --git a/roles/lib_openshift/src/ansible/oc_edit.py b/roles/lib_openshift/src/ansible/oc_edit.py
index 5c5954747..221047393 100644
--- a/roles/lib_openshift/src/ansible/oc_edit.py
+++ b/roles/lib_openshift/src/ansible/oc_edit.py
@@ -15,20 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
- kind=dict(required=True,
- type='str',
- choices=['dc', 'deploymentconfig',
- 'rc', 'replicationcontroller',
- 'svc', 'service',
- 'scc', 'securitycontextconstraints',
- 'ns', 'namespace', 'project', 'projects',
- 'is', 'imagestream',
- 'istag', 'imagestreamtag',
- 'bc', 'buildconfig',
- 'routes',
- 'node',
- 'secret',
- 'pv', 'persistentvolume']),
+ kind=dict(required=True, type='str'),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py
new file mode 100644
index 000000000..d4674845e
--- /dev/null
+++ b/roles/lib_utils/library/docker_creds.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+#
+# Copyright 2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+DOCUMENTATION = '''
+---
+module: docker_creds
+
+short_description: Creates/updates a 'docker login' file in place of using 'docker login'
+
+version_added: "2.4"
+
+description:
+ - This module creates a docker config.json file in the directory provided by 'path'
+ on hosts that do not support 'docker login' but need the file present for
+ registry authentication purposes of various other services.
+
+options:
+ path:
+ description:
+ - This is the message to send to the sample module
+ required: true
+ registry:
+ description:
+ - This is the registry the credentials are for.
+ required: true
+ username:
+ description:
+ - This is the username to authenticate to the registry with.
+ required: true
+ password:
+ description:
+ - This is the password to authenticate to the registry with.
+ required: true
+
+author:
+ - "Michael Gugino <mgugino@redhat.com>"
+'''
+
+EXAMPLES = '''
+# Pass in a message
+- name: Place credentials in file
+ docker_creds:
+ path: /root/.docker
+ registry: registry.example.com:443
+ username: myuser
+ password: mypassword
+'''
+
+
+def check_dest_dir_exists(module, dest):
+ '''Check if dest dir is present and is a directory'''
+ dir_exists = os.path.exists(dest)
+ if dir_exists:
+ if not os.path.isdir(dest):
+ msg = "{} exists but is not a directory".format(dest)
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': msg,
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ else:
+ return 1
+ else:
+ return 0
+
+
+def create_dest_dir(module, dest):
+ try:
+ os.makedirs(dest, mode=0o700)
+ except OSError as oserror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(oserror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def load_config_file(module, dest):
+ '''load the config.json in directory dest'''
+ conf_file_path = os.path.join(dest, 'config.json')
+ if os.path.exists(conf_file_path):
+ # Try to open the file and load json data
+ try:
+ with open(conf_file_path) as conf_file:
+ data = conf_file.read()
+ jdata = json.loads(data)
+
+ except IOError as ioerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(ioerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ except ValueError as jsonerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(jsonerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+ return jdata
+ else:
+ # File doesn't exist, we just return an empty dictionary.
+ return {}
+
+
+def update_config(docker_config, registry, username, password):
+ '''Add our registry auth credentials into docker_config dict'''
+
+ # Add anything that might be missing in our dictionary
+ if 'auths' not in docker_config:
+ docker_config['auths'] = {}
+ if registry not in docker_config['auths']:
+ docker_config['auths'][registry] = {}
+
+ # base64 encode our username:password string
+ encoded_data = base64.b64encode('{}:{}'.format(username, password))
+
+ # check if the same value is already present for idempotency.
+ if 'auth' in docker_config['auths'][registry]:
+ if docker_config['auths'][registry]['auth'] == encoded_data:
+ # No need to go further, everything is already set in file.
+ return False
+ docker_config['auths'][registry]['auth'] = encoded_data
+ return True
+
+
+def write_config(module, docker_config, dest):
+ '''Write updated credentials into dest/config.json'''
+ conf_file_path = os.path.join(dest, 'config.json')
+ try:
+ with open(conf_file_path, 'w') as conf_file:
+ json.dump(docker_config, conf_file, indent=8)
+ except IOError as ioerror:
+ result = {'failed': True,
+ 'changed': False,
+ 'msg': str(ioerror),
+ 'state': 'unknown'}
+ module.fail_json(**result)
+
+
+def run_module():
+ '''Run this module'''
+ module_args = dict(
+ path=dict(aliases=['dest', 'name'], required=True, type='path'),
+ registry=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False
+ )
+
+ # First, create our dest dir if necessary
+ dest = module.params['path']
+ registry = module.params['registry']
+ username = module.params['username']
+ password = module.params['password']
+
+ if not check_dest_dir_exists(module, dest):
+ create_dest_dir(module, dest)
+ docker_config = {}
+ else:
+ # We want to scrape the contents of dest/config.json
+ # in case there are other registries/settings already present.
+ docker_config = load_config_file(module, dest)
+
+ # Put the registry auth info into the config dict.
+ changed = update_config(docker_config, registry, username, password)
+
+ if changed:
+ write_config(module, docker_config, dest)
+
+ result = {'changed': changed}
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 51f7d31c2..c9a429675 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -1,6 +1,7 @@
---
openshift_aws_create_s3: True
openshift_aws_create_iam_cert: True
+openshift_aws_create_iam_role: False
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
@@ -17,6 +18,10 @@ openshift_aws_iam_cert_path: ''
openshift_aws_iam_cert_key_path: ''
openshift_aws_scale_group_basename: "{{ openshift_aws_clusterid }} openshift"
+openshift_aws_iam_role_name: openshift_node_describe_instances
+openshift_aws_iam_role_policy_json: "{{ lookup('file', 'describeinstances.json') }}"
+openshift_aws_iam_role_policy_name: "describe_instances"
+
openshift_aws_iam_kms_alias: "alias/{{ openshift_aws_clusterid }}_kms"
openshift_aws_ami: ''
openshift_aws_ami_copy_wait: False
@@ -135,6 +140,9 @@ openshift_aws_master_group_config:
wait_for_instances: True
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
elbs: "{{ openshift_aws_elb_name_dict['master'].keys()| map('extract', openshift_aws_elb_name_dict['master']) | list }}"
openshift_aws_node_group_config:
@@ -155,6 +163,9 @@ openshift_aws_node_group_config:
type: compute
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
# The 'infra' key is always required here.
infra:
instance_type: m4.xlarge
@@ -172,6 +183,9 @@ openshift_aws_node_group_config:
type: infra
termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
+ iam_role: "{{ openshift_aws_iam_role_name }}"
+ policy_name: "{{ openshift_aws_iam_role_policy_name }}"
+ policy_json: "{{ openshift_aws_iam_role_policy_json }}"
elbs: "{{ openshift_aws_elb_name_dict['infra'].keys()| map('extract', openshift_aws_elb_name_dict['infra']) | list }}"
openshift_aws_elb_tags: "{{ openshift_aws_kube_tags }}"
diff --git a/roles/openshift_aws/files/describeinstances.json b/roles/openshift_aws/files/describeinstances.json
new file mode 100644
index 000000000..40de49721
--- /dev/null
+++ b/roles/openshift_aws/files/describeinstances.json
@@ -0,0 +1,15 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Action": [
+ "ec2:DescribeInstances"
+ ],
+ "Resource": [
+ "*"
+ ],
+ "Effect": "Allow",
+ "Sid": "Stmt1438195894000"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/files/trustpolicy.json b/roles/openshift_aws/files/trustpolicy.json
new file mode 100644
index 000000000..87c7d7c42
--- /dev/null
+++ b/roles/openshift_aws/files/trustpolicy.json
@@ -0,0 +1,12 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+}
diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml
index 852adc7b5..7e8e9b679 100644
--- a/roles/openshift_aws/tasks/build_node_group.yml
+++ b/roles/openshift_aws/tasks/build_node_group.yml
@@ -27,6 +27,9 @@
- set_fact:
l_epoch_time: "{{ ansible_date_time.epoch }}"
+- when: openshift_aws_create_iam_role
+ include: iam_role.yml
+
- when: openshift_aws_create_launch_config
include: launch_config.yml
diff --git a/roles/openshift_aws/tasks/iam_role.yml b/roles/openshift_aws/tasks/iam_role.yml
new file mode 100644
index 000000000..d9910d938
--- /dev/null
+++ b/roles/openshift_aws/tasks/iam_role.yml
@@ -0,0 +1,36 @@
+---
+#####
+# Instance profiles consist of two parts. The first part is creating a role
+# in which the instance has access and will use this role's permissions
+# to make API calls on his behalf. This role requires a trust policy
+# which links a service (ec2) to the role. This states that this role
+# has access to make call ec2 API calls.
+# See ../files/trustpolicy.json
+#
+# Currently openshift-node requires
+# access to the AWS API to call describeinstances.
+# https://bugzilla.redhat.com/show_bug.cgi?id=1510519
+#####
+- name: Create an iam role
+ iam_role:
+ name: "{{ item.value.iam_role }}"
+ assume_role_policy_document: "{{ lookup('file','trustpolicy.json') }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
+
+#####
+# The second part of this task file is linking the role to a policy
+# that specifies which calls the role can make to the ec2 API.
+# Currently all that is required is DescribeInstances.
+# See ../files/describeinstances.json
+#####
+- name: create an iam policy
+ iam_policy:
+ iam_type: role
+ iam_name: "{{ item.value.iam_role }}"
+ policy_json: "{{ item.value.policy_json }}"
+ policy_name: "{{ item.value.policy_name }}"
+ state: "{{ openshift_aws_iam_role_state | default('present') }}"
+ when: item.value.iam_role is defined
+ with_dict: "{{ l_nodes_to_build }}"
diff --git a/roles/openshift_aws/tasks/launch_config_create.yml b/roles/openshift_aws/tasks/launch_config_create.yml
index 8265c2179..a688496d2 100644
--- a/roles/openshift_aws/tasks/launch_config_create.yml
+++ b/roles/openshift_aws/tasks/launch_config_create.yml
@@ -15,6 +15,10 @@
image_id: "{{ l_aws_ami_map[launch_config_item.key] | default(openshift_aws_ami) }}"
instance_type: "{{ launch_config_item.value.instance_type }}"
security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
+ instance_profile_name: "{{ launch_config_item.value.iam_role if launch_config_item.value.iam_role is defined and
+ launch_config_item.value.iam_role != '' and
+ openshift_aws_create_iam_role
+ else omit }}"
user_data: "{{ lookup('template', 'user_data.j2') }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
ebs_optimized: False
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 587526d07..31f0f8e7a 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -97,10 +97,8 @@
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_master_cert_expire_days }}
--signer-expire-days={{ openshift_ca_cert_expire_days }}
- {% endif %}
--overwrite=false
when: master_ca_missing | bool or openshift_certificates_redeploy | default(false) | bool
delegate_to: "{{ openshift_ca_host }}"
@@ -169,9 +167,7 @@
--signer-serial={{ openshift_ca_serial }}
--user=system:openshift-master
--basename=openshift-master
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
- name: Copy generated loopback master client config to master config dir
copy:
src: "{{ openshift_ca_loopback_tmpdir.stdout }}/{{ item }}"
diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml
index 5788e6d74..324630491 100644
--- a/roles/openshift_cloud_provider/tasks/openstack.yml
+++ b/roles/openshift_cloud_provider/tasks/openstack.yml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "The Openstack integration requires OpenShift Enterprise 3.2 or Origin 1.2."
- when: not openshift.common.version_gte_3_2_or_1_2 | bool
-
- name: Create cloud config
template:
dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 699dc300f..99ebb7e36 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -490,7 +490,7 @@ def set_selectors(facts):
facts['hosted']['metrics'] = {}
if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts:
+ if 'logging' not in facts or not isinstance(facts['logging'], dict):
facts['logging'] = {}
if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
facts['logging']['selector'] = None
@@ -806,7 +806,7 @@ def set_deployment_facts_if_unset(facts):
# pylint: disable=too-many-statements
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
- common.version_gte_3_1_or_1_1.
+ common.version_gte_3_x
Args:
facts (dict): existing facts
@@ -814,49 +814,19 @@ def set_version_facts_if_unset(facts):
dict: the facts dict updated with version facts.
"""
if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
openshift_version = get_openshift_version(facts)
if openshift_version and openshift_version != "latest":
version = LooseVersion(openshift_version)
facts['common']['version'] = openshift_version
facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
- if deployment_type == 'origin':
- version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
- version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
- version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
- version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
- version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
- version_gte_3_6 = version >= LooseVersion('3.6')
- version_gte_3_7 = version >= LooseVersion('3.7')
- version_gte_3_8 = version >= LooseVersion('3.8')
- else:
- version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
- version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
- version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
- version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
- version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
- version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
- version_gte_3_6 = version >= LooseVersion('3.6')
- version_gte_3_7 = version >= LooseVersion('3.7')
- version_gte_3_8 = version >= LooseVersion('3.8')
+ version_gte_3_6 = version >= LooseVersion('3.6')
+ version_gte_3_7 = version >= LooseVersion('3.7')
+ version_gte_3_8 = version >= LooseVersion('3.8')
else:
# 'Latest' version is set to True, 'Next' versions set to False
- version_gte_3_1_or_1_1 = True
- version_gte_3_1_1_or_1_1_1 = True
- version_gte_3_2_or_1_2 = True
- version_gte_3_3_or_1_3 = True
- version_gte_3_4_or_1_4 = True
- version_gte_3_5_or_1_5 = True
version_gte_3_6 = True
version_gte_3_7 = True
version_gte_3_8 = False
- facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
- facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
- facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
- facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
- facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
- facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
facts['common']['version_gte_3_6'] = version_gte_3_6
facts['common']['version_gte_3_7'] = version_gte_3_7
facts['common']['version_gte_3_8'] = version_gte_3_8
@@ -867,18 +837,8 @@ def set_version_facts_if_unset(facts):
examples_content_version = 'v3.7'
elif version_gte_3_6:
examples_content_version = 'v3.6'
- elif version_gte_3_5_or_1_5:
- examples_content_version = 'v1.5'
- elif version_gte_3_4_or_1_4:
- examples_content_version = 'v1.4'
- elif version_gte_3_3_or_1_3:
- examples_content_version = 'v1.3'
- elif version_gte_3_2_or_1_2:
- examples_content_version = 'v1.2'
- elif version_gte_3_1_or_1_1:
- examples_content_version = 'v1.1'
else:
- examples_content_version = 'v1.0'
+ examples_content_version = 'v1.5'
facts['common']['examples_content_version'] = examples_content_version
diff --git a/roles/openshift_hosted/tasks/secure.yml b/roles/openshift_hosted/tasks/secure.yml
index 0da8ac8a7..174bc39a4 100644
--- a/roles/openshift_hosted/tasks/secure.yml
+++ b/roles/openshift_hosted/tasks/secure.yml
@@ -42,7 +42,7 @@
- "{{ openshift_hosted_registry_routehost }}"
cert: "{{ docker_registry_cert_path }}"
key: "{{ docker_registry_key_path }}"
- expire_days: "{{ openshift_hosted_registry_cert_expire_days if openshift_version | oo_version_gte_3_5_or_1_5(openshift_deployment_type) | bool else omit }}"
+ expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"
register: registry_self_cert
when: docker_registry_self_signed
diff --git a/roles/openshift_hosted_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
index 88b893448..074b72942 100644
--- a/roles/openshift_hosted_metrics/handlers/main.yml
+++ b/roles/openshift_hosted_metrics/handlers/main.yml
@@ -18,11 +18,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml
index 88b893448..074b72942 100644
--- a/roles/openshift_logging/handlers/main.yml
+++ b/roles/openshift_logging/handlers/main.yml
@@ -18,11 +18,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml
index f67aee88b..fbbac1176 100644
--- a/roles/openshift_manage_node/tasks/main.yml
+++ b/roles/openshift_manage_node/tasks/main.yml
@@ -7,11 +7,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift_node_master_api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index a27fbae7e..97a8735ee 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -31,6 +31,7 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
containerized_svc_dir: "/usr/lib/systemd/system"
ha_svc_template_path: "native-cluster"
diff --git a/roles/openshift_master/handlers/main.yml b/roles/openshift_master/handlers/main.yml
index f88c4a7dc..359536202 100644
--- a/roles/openshift_master/handlers/main.yml
+++ b/roles/openshift_master/handlers/main.yml
@@ -25,11 +25,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index c7c02d49b..b6d3539b1 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -18,12 +18,6 @@
- openshift.master.ha | bool
- (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
- fail:
- msg: "'native' high availability is not supported for the requested OpenShift version"
- when:
- - openshift.master.ha | bool
- - openshift.master.cluster_method == "native"
- - not openshift.common.version_gte_3_1_or_1_1 | bool
-- fail:
msg: "openshift_master_cluster_password must be set for multi-master installations"
when:
- openshift.master.ha | bool
@@ -222,8 +216,6 @@
when: openshift_master_bootstrap_enabled | default(False)
- include: set_loopback_context.yml
- when:
- - openshift.common.version_gte_3_2_or_1_2
- name: Start and enable master api on first master
systemd:
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index cde01c49e..c95f562d0 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create
@@ -18,6 +19,25 @@
- restart master api
- restart master controllers
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | default(False) | bool
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: master_oreg_auth_credentials_create
+ notify:
+ - restart master api
+ - restart master controllers
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index c83fc9fbb..629fe3286 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -3,9 +3,6 @@ admissionConfig:
pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }}
{% endif %}
apiLevels:
-{% if not openshift.common.version_gte_3_1_or_1_1 | bool %}
-- v1beta3
-{% endif %}
- v1
apiVersion: v1
assetConfig:
@@ -44,10 +41,9 @@ assetConfig:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift.master.audit_config | default(none) is not none and openshift.common.version_gte_3_2_or_1_2 | bool %}
+{% if openshift.master.audit_config | default(none) is not none %}
auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }}
{% endif %}
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
controllerConfig:
election:
lockName: openshift-master-controllers
@@ -55,15 +51,14 @@ controllerConfig:
signer:
certFile: service-signer.crt
keyFile: service-signer.key
-{% endif %}
controllers: '*'
corsAllowedOrigins:
# anchor with start (\A) and end (\z) of the string, make the check case insensitive ((?i)) and escape hostname
{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- - (?i)\A{{ origin | regex_escape() }}\z
+ - (?i)//{{ origin | regex_escape() }}(:|\z)
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- - (?i)\A{{ custom_origin | regex_escape() }}\z
+ - (?i)//{{ custom_origin | regex_escape() }}(:|\z)
{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
@@ -74,11 +69,7 @@ dnsConfig:
bindNetwork: tcp4
{% endif %}
etcdClientInfo:
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
ca: {{ "ca-bundle.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
-{% else %}
- ca: {{ "ca.crt" if (openshift.master.embedded_etcd | bool) else "master.etcd-ca.crt" }}
-{% endif %}
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
@@ -92,20 +83,12 @@ etcdConfig:
peerServingInfo:
bindAddress: {{ openshift.master.bind_addr }}:7001
certFile: etcd.server.crt
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
clientCA: ca-bundle.crt
-{% else %}
- clientCA: ca.crt
-{% endif %}
keyFile: etcd.server.key
servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.etcd_port }}
certFile: etcd.server.crt
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
clientCA: ca-bundle.crt
-{% else %}
- clientCA: ca.crt
-{% endif %}
keyFile: etcd.server.key
storageDirectory: {{ r_openshift_master_data_dir }}/openshift.local.etcd
{% endif %}
@@ -123,21 +106,12 @@ imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level
kind: MasterConfig
kubeletClientInfo:
{# TODO: allow user specified kubelet port #}
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
ca: ca-bundle.crt
-{% else %}
- ca: ca.crt
-{% endif %}
certFile: master.kubelet-client.crt
keyFile: master.kubelet-client.key
port: 10250
{% if openshift.master.embedded_kube | bool %}
kubernetesMasterConfig:
-{% if not openshift.common.version_gte_3_1_or_1_1 | bool %}
- apiLevels:
- - v1beta3
- - v1
-{% endif %}
apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
{% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}
storage-backend:
@@ -160,21 +134,17 @@ kubernetesMasterConfig:
{% endif %}
masterClients:
{# TODO: allow user to set externalKubernetesKubeConfig #}
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
externalKubernetesClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
burst: {{ openshift_master_external_ratelimit_burst | default(400) }}
qps: {{ openshift_master_external_ratelimit_qps | default(200) }}
-{% endif %}
externalKubernetesKubeConfig: ""
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
openshiftLoopbackClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
burst: {{ openshift_master_loopback_ratelimit_burst | default(600) }}
qps: {{ openshift_master_loopback_ratelimit_qps | default(300) }}
-{% endif %}
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
@@ -208,11 +178,7 @@ oauthConfig:
{% for line in translated_identity_providers.splitlines() %}
{{ line }}
{% endfor %}
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
masterCA: ca-bundle.crt
-{% else %}
- masterCA: ca.crt
-{% endif %}
masterPublicURL: {{ openshift.master.public_api_url }}
masterURL: {{ openshift.master.api_url }}
sessionConfig:
@@ -245,11 +211,7 @@ serviceAccountConfig:
- default
- builder
- deployer
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
masterCA: ca-bundle.crt
-{% else %}
- masterCA: ca.crt
-{% endif %}
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
- serviceaccounts.public.key
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
index e284413f7..fae021845 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2
@@ -7,11 +7,7 @@ Wants={{ openshift.common.service_type }}-master-api.service
Requires=network-online.target
[Service]
-{% if openshift.common.version_gte_3_1_1_or_1_1_1 | bool %}
Type=notify
-{% else %}
-Type=simple
-{% endif %}
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index d9ffb1b6f..ec1fbb1ee 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -3,7 +3,7 @@
openshift_master_certs_no_etcd:
- admin.crt
- master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
+ - master.proxy-client.crt
- master.server.crt
- openshift-master.crt
- openshift-registry.crt
@@ -57,9 +57,7 @@
--hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
--cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt
--key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
@@ -87,9 +85,7 @@
--signer-serial={{ openshift_ca_serial }}
--user=system:openshift-master
--basename=openshift-master
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_master_cert_expire_days }}
- {% endif %}
args:
creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
with_items: "{{ hostvars
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 0543872c9..40705d357 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -3,10 +3,6 @@
msg: "Not possible on atomic hosts for now"
when: openshift.common.is_containerized | bool
-- fail:
- msg: "Pacemaker HA is unsupported on OpenShift Enterprise 3.2 and Origin 1.2"
- when: openshift.master.cluster_method == "pacemaker" and openshift.common.version_gte_3_2_or_1_2 | bool
-
- name: Test if cluster is already configured
command: pcs status
register: pcs_status
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index 97a5179e0..c827f2d26 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -518,29 +518,16 @@ class FilterModule(object):
'admin.key',
'admin.kubeconfig',
'master.kubelet-client.crt',
- 'master.kubelet-client.key']
+ 'master.kubelet-client.key',
+ 'master.proxy-client.crt',
+ 'master.proxy-client.key',
+ 'service-signer.crt',
+ 'service-signer.key']
if bool(include_ca):
certs += ['ca.crt', 'ca.key', 'ca-bundle.crt', 'client-ca-bundle.crt']
if bool(include_keys):
certs += ['serviceaccounts.private.key',
'serviceaccounts.public.key']
- if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):
- certs += ['master.proxy-client.crt',
- 'master.proxy-client.key']
- if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):
- certs += ['openshift-master.crt',
- 'openshift-master.key',
- 'openshift-master.kubeconfig']
- if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):
- certs += ['service-signer.crt',
- 'service-signer.key']
- if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):
- certs += ['openshift-registry.crt',
- 'openshift-registry.key',
- 'openshift-registry.kubeconfig',
- 'openshift-router.crt',
- 'openshift-router.key',
- 'openshift-router.kubeconfig']
return certs
@staticmethod
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml
index 88b893448..074b72942 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_metrics/handlers/main.yml
@@ -18,11 +18,7 @@
# wait_for port doesn't provide health information.
command: >
curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
--cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
{{ openshift.master.api_url }}/healthz/ready
args:
# Disables the following warning:
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 0c6d8db38..89d154ad7 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -85,6 +85,7 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
# NOTE
# r_openshift_node_*_default may be defined external to this role.
@@ -115,3 +116,5 @@ openshift_node_config_dir: "{{ openshift_node_config_dir_default }}"
openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
+
+openshift_node_use_instance_profiles: False
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index 8cf41ab4c..b8be50f6c 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -34,6 +34,7 @@
- name: include aws sysconfig credentials
include: aws.yml
static: yes
+ when: not (openshift_node_use_instance_profiles | default(False))
#- name: update the ExecStart to have bootstrap
# lineinfile:
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index c08f43118..2fea33454 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -49,6 +49,7 @@
- name: include aws provider credentials
include: aws.yml
static: yes
+ when: not (openshift_node_use_instance_profiles | default(False))
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index 5e5e4f94a..f5428867a 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
@@ -17,6 +18,24 @@
notify:
- restart node
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 718d35dca..d452cc45c 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -29,13 +29,11 @@ kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yam
runtime-request-timeout:
- 10m
{% endif %}
-{% if openshift.common.version_gte_3_3_or_1_3 | bool %}
masterClientConnectionOverrides:
acceptContentTypes: application/vnd.kubernetes.protobuf,application/json
contentType: application/vnd.kubernetes.protobuf
burst: 200
qps: 100
-{% endif %}
masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
{% if openshift_node_use_openshift_sdn | bool %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 1a775178d..97f1fbbdd 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -66,9 +66,7 @@
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
--user=system:node:{{ hostvars[item].openshift.common.hostname }}
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_node_cert_expire_days }}
- {% endif %}
args:
creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
with_items: "{{ hostvars
@@ -82,9 +80,7 @@
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
--cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
--key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
- {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
--expire-days={{ openshift_node_cert_expire_days }}
- {% endif %}
--overwrite=true
--hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 10b4c6977..1da434e6f 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -12,3 +12,4 @@ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_ur
oreg_auth_credentials_path: "{{ openshift_node_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+openshift_docker_alternative_creds: "{{ (openshift_docker_use_system_container | default(False)) or (openshift_use_crio_only | default(False)) }}"
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 6bcf3072d..66c1fcc38 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -69,8 +69,6 @@
file:
path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
state: absent
- when: (deployment_type == 'openshift-enterprise' and openshift_release | version_compare('3.4', '>='))
- or (deployment_type == 'origin' and openshift_release | version_compare('1.4', '>='))
- include: containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
@@ -98,6 +96,12 @@
failed_when: false
when: not openshift.common.is_containerized | bool
+# https://bugzilla.redhat.com/show_bug.cgi?id=1513054
+- name: Clean up dockershim data
+ file:
+ path: "/var/lib/dockershim/sandbox/"
+ state: absent
+
- name: Upgrade openvswitch
package:
name: openvswitch
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
index 5e5e4f94a..f5428867a 100644
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -8,6 +8,7 @@
- name: Create credentials for registry auth
command: "docker --config={{ oreg_auth_credentials_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
when:
+ - not (openshift_docker_alternative_creds | default(False))
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
@@ -17,6 +18,24 @@
notify:
- restart node
+# docker_creds is a custom module from lib_utils
+# 'docker login' requires a docker.service running on the local host, this is an
+# alternative implementation for non-docker hosts. This implementation does not
+# check the registry to determine whether or not the credentials will work.
+- name: Create credentials for registry auth (alternative)
+ docker_creds:
+ path: "{{ oreg_auth_credentials_path }}"
+ registry: "{{ oreg_host }}"
+ username: "{{ oreg_auth_user }}"
+ password: "{{ oreg_auth_password }}"
+ when:
+ - openshift_docker_alternative_creds | bool
+ - oreg_auth_user is defined
+ - (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+ register: node_oreg_auth_credentials_create
+ notify:
+ - restart node
+
# Container images may need the registry credentials
- name: Setup ro mount of /root/.docker for containerized hosts
set_fact:
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index abe411f67..03c157313 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -84,6 +84,20 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_storageclass | True | Automatically create a StorageClass for each GlusterFS cluster
| openshift_storage_glusterfs_image | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
| openshift_storage_glusterfs_version | 'latest' | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
+| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
+| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
+| openshift_storage_glusterfs_block_max_host_vol | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
+| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
+| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
+| openshift_storage_glusterfs_s3_account | Undefined | S3 account name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_user | Undefined | S3 user name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_password | Undefined | S3 user password for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_pvc_size | "2Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
+| openshift_storage_glusterfs_s3_meta_pvc | Dynamic | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_meta_pvc_size | "1Gi" | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
| openshift_storage_glusterfs_wipe | False | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
| openshift_storage_glusterfs_heketi_is_native | True | heketi should be containerized
| openshift_storage_glusterfs_heketi_cli | 'heketi-cli' | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
@@ -99,6 +113,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_heketi_ssh_user | 'root' | SSH user for external GlusterFS nodes via native heketi
| openshift_storage_glusterfs_heketi_ssh_sudo | False | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
| openshift_storage_glusterfs_heketi_ssh_keyfile | Undefined | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| openshift_storage_glusterfs_heketi_fstab | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed
| openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
Each role variable also has a corresponding variable to optionally configure a
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index 148549887..c3db36d37 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -7,6 +7,20 @@ openshift_storage_glusterfs_use_default_selector: False
openshift_storage_glusterfs_storageclass: True
openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_block_deploy: True
+openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
+openshift_storage_glusterfs_block_version: 'latest'
+openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_s3_deploy: True
+openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
+openshift_storage_glusterfs_s3_version: 'latest'
+openshift_storage_glusterfs_s3_account: "{{ omit }}"
+openshift_storage_glusterfs_s3_user: "{{ omit }}"
+openshift_storage_glusterfs_s3_password: "{{ omit }}"
+openshift_storage_glusterfs_s3_pvc: "gluster-s3-{{ openshift_storage_glusterfs_name }}-{{ openshift_storage_glusterfs_s3_account }}-claim"
+openshift_storage_glusterfs_s3_pvc_size: "2Gi"
+openshift_storage_glusterfs_s3_meta_pvc: "gluster-s3-{{ openshift_storage_glusterfs_name }}-{{ openshift_storage_glusterfs_s3_account }}-meta-claim"
+openshift_storage_glusterfs_s3_meta_pvc_size: "1Gi"
openshift_storage_glusterfs_wipe: False
openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is_native }}"
openshift_storage_glusterfs_heketi_is_missing: True
@@ -25,6 +39,7 @@ openshift_storage_glusterfs_heketi_ssh_port: 22
openshift_storage_glusterfs_heketi_ssh_user: 'root'
openshift_storage_glusterfs_heketi_ssh_sudo: False
openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ omit }}"
+openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_heketi_executor == 'kubernetes' else '/etc/fstab' | quote }}"
openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
@@ -36,6 +51,20 @@ openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage
openshift_storage_glusterfs_registry_storageclass: False
openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
+openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
+openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
+openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
+openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
+openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
+openshift_storage_glusterfs_registry_s3_account: "{{ openshift_storage_glusterfs_s3_account }}"
+openshift_storage_glusterfs_registry_s3_user: "{{ openshift_storage_glusterfs_s3_user }}"
+openshift_storage_glusterfs_registry_s3_password: "{{ openshift_storage_glusterfs_s3_password }}"
+openshift_storage_glusterfs_registry_s3_pvc: "gluster-s3-{{ openshift_storage_glusterfs_registry_name }}-{{ openshift_storage_glusterfs_registry_s3_account }}-claim"
+openshift_storage_glusterfs_registry_s3_pvc_size: "{{ openshift_storage_glusterfs_s3_pvc_size }}"
+openshift_storage_glusterfs_registry_s3_meta_pvc: "gluster-s3-{{ openshift_storage_glusterfs_registry_name }}-{{ openshift_storage_glusterfs_registry_s3_account }}-meta-claim"
+openshift_storage_glusterfs_registry_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_s3_meta_pvc_size }}"
openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
@@ -54,6 +83,8 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus
openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}"
+openshift_storage_glusterfs_registry_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_registry_heketi_executor == 'kubernetes' else '/etc/fstab' | quote }}"
+
r_openshift_storage_glusterfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_storage_glusterfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
r_openshift_storage_glusterfs_os_firewall_deny: []
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
index 7b705c2d4..34af652c2 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -73,13 +73,11 @@ objects:
- name: HEKETI_EXECUTOR
value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
- value: /var/lib/heketi/fstab
+ value: ${HEKETI_FSTAB}
- name: HEKETI_SNAPSHOT_LIMIT
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
- - name: HEKETI_KUBE_NAMESPACE
- value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -115,10 +113,10 @@ parameters:
displayName: heketi executor type
description: Set the executor type, kubernetes or ssh
value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
- displayName: Namespace
- description: Set the namespace where the GlusterFS pods reside
- value: default
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
- name: HEKETI_ROUTE
displayName: heketi route name
description: Set the hostname for the route URL
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
new file mode 100644
index 000000000..2cc69644c
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ image: gluster/glusterblock-provisioner:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
index 8c5e1ded3..09850a2c2 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml
@@ -35,6 +35,15 @@ objects:
- name: glusterfs
image: ${IMAGE_NAME}:${IMAGE_VERSION}
imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
volumeMounts:
- name: glusterfs-heketi
mountPath: "/var/lib/heketi"
@@ -83,7 +92,6 @@ objects:
periodSeconds: 25
successThreshold: 1
failureThreshold: 15
- resources: {}
terminationMessagePath: "/dev/termination-log"
volumes:
- name: glusterfs-heketi
@@ -134,3 +142,13 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
index 61b6a8c13..28cdb2982 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml
@@ -15,6 +15,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
annotations:
description: Exposes Heketi service
spec:
@@ -30,6 +31,7 @@ objects:
name: ${HEKETI_ROUTE}
labels:
glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
spec:
to:
kind: Service
@@ -40,6 +42,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
annotations:
description: Defines how to deploy Heketi
spec:
@@ -55,6 +58,7 @@ objects:
name: heketi-${CLUSTER_NAME}
labels:
glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
spec:
serviceAccountName: heketi-${CLUSTER_NAME}-service-account
containers:
@@ -69,13 +73,11 @@ objects:
- name: HEKETI_EXECUTOR
value: ${HEKETI_EXECUTOR}
- name: HEKETI_FSTAB
- value: /var/lib/heketi/fstab
+ value: ${HEKETI_FSTAB}
- name: HEKETI_SNAPSHOT_LIMIT
value: '14'
- name: HEKETI_KUBE_GLUSTER_DAEMONSET
value: '1'
- - name: HEKETI_KUBE_NAMESPACE
- value: ${HEKETI_KUBE_NAMESPACE}
ports:
- containerPort: 8080
volumeMounts:
@@ -114,10 +116,10 @@ parameters:
displayName: heketi executor type
description: Set the executor type, kubernetes or ssh
value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
- displayName: Namespace
- description: Set the namespace where the GlusterFS pods reside
- value: default
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
- name: HEKETI_ROUTE
displayName: heketi route name
description: Set the hostname for the route URL
diff --git a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
new file mode 100644
index 000000000..1664ecc1e
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
@@ -0,0 +1,113 @@
+---
+- name: Delete pre-existing gluster-s3 resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "all,svc,deploy,secret,sc,pvc"
+ selector: "gluster-s3"
+ failed_when: False
+ when: glusterfs_wipe
+
+- name: Wait for gluster-s3 pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=s3-{{ glusterfs_name }}-provisioner-pod"
+ register: gluster_s3_pod
+ until: "gluster_s3_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when: glusterfs_wipe
+
+- name: Copy gluster-s3 PVCs template file
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "gluster-s3-pvcs-template.yml"
+
+- name: Create gluster-s3 PVCs template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: "gluster-s3-pvcs"
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/gluster-s3-pvcs-template.yml"
+
+- name: Create gluster-s3 PVCs
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "gluster-s3-pvcs"
+ create: True
+ params:
+ S3_ACCOUNT: "{{ glusterfs_s3_account }}"
+ PVC: "{{ glusterfs_s3_pvc }}"
+ PVC_SIZE: "{{ glusterfs_s3_pvc_size }}"
+ META_PVC: "{{ glusterfs_s3_meta_pvc }}"
+ META_PVC_SIZE: "{{ glusterfs_s3_meta_pvc_size }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for gluster-s3 PVCs
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pvc
+ state: list
+ selector: "glusterfs=s3-{{ glusterfs_name }}-{{ glusterfs_s3_account }}-storage"
+ register: gluster_s3_pvcs
+ until:
+ - "gluster_s3_pvcs.results.results[0]['items'] | count > 0"
+ # Pod's 'Bound' status must be True
+ - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+
+- name: Copy gluster-s3 template file
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "gluster-s3-template.yml"
+
+- name: Create gluster-s3 template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: "gluster-s3"
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/gluster-s3-template.yml"
+
+- name: Deploy gluster-s3 service
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "gluster-s3"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_s3_image }}"
+ IMAGE_VERSION: "{{ glusterfs_s3_version }}"
+ S3_ACCOUNT: "{{ glusterfs_s3_account }}"
+ S3_USER: "{{ glusterfs_s3_user }}"
+ S3_PASSWORD: "{{ glusterfs_s3_password }}"
+ PVC: "{{ glusterfs_s3_pvc }}"
+ META_PVC: "{{ glusterfs_s3_meta_pvc }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for gluster-s3 pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=s3-{{ glusterfs_name }}-{{ glusterfs_s3_account }}-pod"
+ register: gluster_s3_pod
+ until:
+ - "gluster_s3_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
new file mode 100644
index 000000000..bba1de654
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -0,0 +1,66 @@
+---
+- name: Delete pre-existing glusterblock provisioner resources
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: "{{ item.kind }}"
+ name: "{{ item.name | default(omit) }}"
+ selector: "{{ item.selector | default(omit) }}"
+ state: absent
+ with_items:
+ - kind: "all,deploy,sa,clusterrole,clusterrolebinding"
+ selector: "glusterblock"
+ failed_when: False
+ when: glusterfs_wipe
+
+- name: Wait for glusterblock provisioner pods to terminate
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=block-{{ glusterfs_name }}-provisioner-pod"
+ register: glusterblock_pod
+ until: "glusterblock_pod.results.results[0]['items'] | count == 0"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+ when: glusterfs_wipe
+
+- name: Copy initial glusterblock provisioner resource file
+ copy:
+ src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+ dest: "{{ mktemp.stdout }}/{{ item }}"
+ with_items:
+ - "glusterblock-template.yml"
+
+- name: Create glusterblock provisioner template
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: template
+ name: "glusterblock"
+ state: present
+ files:
+ - "{{ mktemp.stdout }}/glusterblock-template.yml"
+
+- name: Deploy glusterblock provisioner
+ oc_process:
+ namespace: "{{ glusterfs_namespace }}"
+ template_name: "glusterblock"
+ create: True
+ params:
+ IMAGE_NAME: "{{ glusterfs_block_image }}"
+ IMAGE_VERSION: "{{ glusterfs_block_version }}"
+ NAMESPACE: "{{ glusterfs_namespace }}"
+ CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for glusterblock provisioner pod
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: pod
+ state: list
+ selector: "glusterfs=block-{{ glusterfs_name }}-provisioner-pod"
+ register: glusterblock_pod
+ until:
+ - "glusterblock_pod.results.results[0]['items'] | count > 0"
+ # Pod's 'Ready' status must be True
+ - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+ delay: 10
+ retries: "{{ (glusterfs_timeout | int / 10) | int }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
index 51724f979..2a678af57 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
@@ -259,51 +259,59 @@
- glusterfs_heketi_is_native
- glusterfs_heketi_is_missing
-- name: Create heketi secret
- oc_secret:
- namespace: "{{ glusterfs_namespace }}"
- state: present
- name: "heketi-{{ glusterfs_name }}-admin-secret"
- type: "kubernetes.io/glusterfs"
- force: True
- contents:
- - path: key
- data: "{{ glusterfs_heketi_admin_key }}"
- when:
- - glusterfs_storageclass
- - glusterfs_heketi_admin_key is defined
-
-- name: Get heketi route
- oc_obj:
- namespace: "{{ glusterfs_namespace }}"
- kind: route
- state: list
- name: "heketi-{{ glusterfs_name }}"
- register: heketi_route
- when:
- - glusterfs_storageclass
- - glusterfs_heketi_is_native
-
-- name: Determine StorageClass heketi URL
+- name: Check if gluster-s3 can't be deployed
set_fact:
- glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+ glusterfs_s3_deploy: False
when:
- - glusterfs_storageclass
- - glusterfs_heketi_is_native
-
-- name: Generate GlusterFS StorageClass file
- template:
- src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
- dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+ - "glusterfs_s3_account is not defined or glusterfs_s3_user is not defined or glusterfs_s3_password is not defined"
+
+- block:
+ - name: Create heketi secret
+ oc_secret:
+ namespace: "{{ glusterfs_namespace }}"
+ state: present
+ name: "heketi-{{ glusterfs_name }}-admin-secret"
+ type: "kubernetes.io/glusterfs"
+ force: True
+ contents:
+ - path: key
+ data: "{{ glusterfs_heketi_admin_key }}"
+ when:
+ - glusterfs_heketi_admin_key is defined
+
+ - name: Get heketi route
+ oc_obj:
+ namespace: "{{ glusterfs_namespace }}"
+ kind: route
+ state: list
+ name: "heketi-{{ glusterfs_name }}"
+ register: heketi_route
+ when:
+ - glusterfs_heketi_is_native
+
+ - name: Determine StorageClass heketi URL
+ set_fact:
+ glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+ when:
+ - glusterfs_heketi_is_native
+
+ - name: Generate GlusterFS StorageClass file
+ template:
+ src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+ dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+ - name: Create GlusterFS StorageClass
+ oc_obj:
+ state: present
+ kind: storageclass
+ name: "glusterfs-{{ glusterfs_name }}"
+ files:
+ - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
when:
- - glusterfs_storageclass
+ - glusterfs_storageclass or glusterfs_s3_deploy
-- name: Create GlusterFS StorageClass
- oc_obj:
- state: present
- kind: storageclass
- name: "glusterfs-{{ glusterfs_name }}"
- files:
- - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
- when:
- - glusterfs_storageclass
+- include: glusterblock_deploy.yml
+ when: glusterfs_block_deploy
+
+- include: gluster_s3_deploy.yml
+ when: glusterfs_s3_deploy
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index 012c722ff..e2d740f35 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -9,6 +9,20 @@
glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+ glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
+ glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
+ glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
+ glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+ glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
+ glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
+ glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
+ glusterfs_s3_account: "{{ openshift_storage_glusterfs_s3_account }}"
+ glusterfs_s3_user: "{{ openshift_storage_glusterfs_s3_user }}"
+ glusterfs_s3_password: "{{ openshift_storage_glusterfs_s3_password }}"
+ glusterfs_s3_pvc: "{{ openshift_storage_glusterfs_s3_pvc }}"
+ glusterfs_s3_pvc_size: "{{ openshift_storage_glusterfs_s3_pvc_size }}"
+ glusterfs_s3_meta_pvc: "{{ openshift_storage_glusterfs_s3_meta_pvc }}"
+ glusterfs_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_s3_meta_pvc_size }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe | bool }}"
glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native | bool }}"
glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing | bool }}"
@@ -27,6 +41,7 @@
glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo | bool }}"
glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
+ glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
glusterfs_nodes: "{{ groups.glusterfs }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index 932d06038..f98d4c62f 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -87,6 +87,7 @@
IMAGE_VERSION: "{{ glusterfs_version }}"
NODE_LABELS: "{{ glusterfs_nodeselector }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
+ GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
- name: Wait for GlusterFS pods
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index 1bcab8e49..baac52179 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -9,6 +9,20 @@
glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+ glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
+ glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
+ glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
+ glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+ glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
+ glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
+ glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
+ glusterfs_s3_account: "{{ openshift_storage_glusterfs_registry_s3_account }}"
+ glusterfs_s3_user: "{{ openshift_storage_glusterfs_registry_s3_user }}"
+ glusterfs_s3_password: "{{ openshift_storage_glusterfs_registry_s3_password }}"
+ glusterfs_s3_pvc: "{{ openshift_storage_glusterfs_registry_s3_pvc }}"
+ glusterfs_s3_pvc_size: "{{ openshift_storage_glusterfs_registry_s3_pvc_size }}"
+ glusterfs_s3_meta_pvc: "{{ openshift_storage_glusterfs_registry_s3_meta_pvc }}"
+ glusterfs_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_registry_s3_meta_pvc_size }}"
glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe | bool }}"
glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native | bool }}"
glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing | bool }}"
@@ -27,6 +41,7 @@
glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_registry_heketi_ssh_user }}"
glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"
glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
+ glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
- include: glusterfs_common.yml
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
index 73396c9af..935d3b689 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
@@ -27,7 +27,7 @@
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
- HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Set heketi Deployed fact
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 54a6dd7c3..d23bd42b9 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -107,7 +107,7 @@
HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
- HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+ HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- name: Wait for heketi pod
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
deleted file mode 100644
index 622afb6bd..000000000
--- a/roles/template_service_broker/files/openshift-ansible-catalog-console.js
+++ /dev/null
@@ -1 +0,0 @@
-window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = true;
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index 0db9e642a..99a58baff 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -52,11 +52,6 @@
shell: >
{{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f -
-- name: copy tech preview extension file for service console UI
- copy:
- src: openshift-ansible-catalog-console.js
- dest: /etc/origin/master/openshift-ansible-catalog-console.js
-
# Check that the TSB is running
- name: Verify that TSB is running
command: >