From 3e33c9640d2b4b2d15a21483c34bb48845fce7bf Mon Sep 17 00:00:00 2001
From: Saravanakumar <sarumuga@redhat.com>
Date: Thu, 29 Jun 2017 17:58:56 +0530
Subject: rename docker-gluster-swift as docker-gluster-s3 and the top level
 directory gluster-object as gluster-s3object

Update README and test scripts to reflect name change.

Signed-off-by: Saravanakumar <sarumuga@redhat.com>
---
 README.md                                          |  10 +-
 .../CentOS/docker-gluster-swift/Dockerfile         |  82 ----------------
 .../CentOS/docker-gluster-swift/README.md          | 109 ---------------------
 .../etc/swift/account-server.conf                  |  39 --------
 .../etc/swift/container-server.conf                |  39 --------
 .../CentOS/docker-gluster-swift/etc/swift/fs.conf  |  24 -----
 .../etc/swift/object-expirer.conf                  |  61 ------------
 .../etc/swift/object-server.conf                   |  55 -----------
 .../etc/swift/proxy-server.conf                    |  99 -------------------
 .../docker-gluster-swift/etc/swift/swift.conf      |  85 ----------------
 .../etc/sysconfig/swift-volumes                    |   2 -
 .../CentOS/docker-gluster-swift/memcached.service  |   9 --
 .../docker-gluster-swift/swift-account.service     |  10 --
 .../docker-gluster-swift/swift-container.service   |  10 --
 .../swift-gen-builders.service                     |  11 ---
 .../docker-gluster-swift/swift-object.service      |  10 --
 .../docker-gluster-swift/swift-proxy.service       |  10 --
 .../docker-gluster-swift/update_gluster_vol.sh     |  13 ---
 .../CentOS/docker-gluster-s3/Dockerfile            |  82 ++++++++++++++++
 .../CentOS/docker-gluster-s3/README.md             | 109 +++++++++++++++++++++
 .../etc/swift/account-server.conf                  |  39 ++++++++
 .../etc/swift/container-server.conf                |  39 ++++++++
 .../CentOS/docker-gluster-s3/etc/swift/fs.conf     |  24 +++++
 .../etc/swift/object-expirer.conf                  |  61 ++++++++++++
 .../docker-gluster-s3/etc/swift/object-server.conf |  55 +++++++++++
 .../docker-gluster-s3/etc/swift/proxy-server.conf  |  99 +++++++++++++++++++
 .../CentOS/docker-gluster-s3/etc/swift/swift.conf  |  85 ++++++++++++++++
 .../docker-gluster-s3/etc/sysconfig/swift-volumes  |   2 +
 .../CentOS/docker-gluster-s3/memcached.service     |   9 ++
 .../CentOS/docker-gluster-s3/swift-account.service |  10 ++
 .../docker-gluster-s3/swift-container.service      |  10 ++
 .../docker-gluster-s3/swift-gen-builders.service   |  11 +++
 .../CentOS/docker-gluster-s3/swift-object.service  |  10 ++
 .../CentOS/docker-gluster-s3/swift-proxy.service   |  10 ++
 .../CentOS/docker-gluster-s3/update_gluster_vol.sh |  13 +++
 tests/shelltest/test_gluster_container_basic.sh    |   8 +-
 36 files changed, 677 insertions(+), 677 deletions(-)
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/Dockerfile
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/README.md
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/memcached.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/swift-account.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/swift-container.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/swift-gen-builders.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/swift-object.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/swift-proxy.service
 delete mode 100644 gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/Dockerfile
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/README.md
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/account-server.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/container-server.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/fs.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-expirer.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-server.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/proxy-server.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/swift/swift.conf
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/etc/sysconfig/swift-volumes
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/memcached.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/swift-account.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/swift-container.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/swift-gen-builders.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/swift-object.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/swift-proxy.service
 create mode 100644 gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh

diff --git a/README.md b/README.md
index 68eee5c..57f5043 100644
--- a/README.md
+++ b/README.md
@@ -119,12 +119,12 @@ Additional Ref# https://goo.gl/3031Mm
 
 ## Gluster Object Docker container:
 
-### To pull gluster-object:
+### To pull gluster-s3:
 ~~~
-$ docker pull gluster/gluster-object
+$ docker pull gluster/gluster-s3
 ~~~
 
-### To run gluster-object container:
+### To run gluster-s3 container:
 
 On the host machine, mount one or more gluster volumes under the directory
 `/mnt/gluster-object` with mountpoint name being same as that of the volume.
@@ -143,10 +143,10 @@ GLUSTER_VOLUMES='tv1'
 Where tv1 is the volume name.
 
 ~~~
-$ docker run -d --privileged  -v /sys/fs/cgroup/:/sys/fs/cgroup/:ro -p 8080:8080 -v /mnt/gluster-object:/mnt/gluster-object  gluster/gluster-object
+$ docker run -d --privileged  -v /sys/fs/cgroup/:/sys/fs/cgroup/:ro -p 8080:8080 -v /mnt/gluster-object:/mnt/gluster-object  gluster/gluster-s3
 ~~~
 
-Now, We can get/put objects into the gluster volume, using the gluster-object Docker container.
+Now, We can get/put objects into the gluster volume, using the gluster-s3 Docker container.
 Refer this link[1] for testing.
 
 [1] https://github.com/gluster/gluster-swift/blob/master/doc/markdown/quick_start_guide.md#using_swift
diff --git a/gluster-object/CentOS/docker-gluster-swift/Dockerfile b/gluster-object/CentOS/docker-gluster-swift/Dockerfile
deleted file mode 100644
index 9ec1e2f..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/Dockerfile
+++ /dev/null
@@ -1,82 +0,0 @@
-FROM centos:7
-MAINTAINER Prashanth Pai <ppai@redhat.com>
-
-# centos-release-openstack-kilo package resides in the extras repo.
-# All subsequent actual packages come from the CentOS Cloud SIG repo:
-# http://mirror.centos.org/centos/7/cloud/x86_64/
-
-# Install PACO servers and S3 middleware.
-# Install gluster-swift dependencies. To be removed when RPMs become available.
-# Clean downloaded packages and index
-
-LABEL architecture="x86_64" \
-      name="gluster/gluster-swift" \
-      version="kilo" \
-      vendor="Red Hat, Inc" \
-      summary="This image has a running gluster-swift service ( centos 7 + gluster-swift)" \
-      io.k8s.display-name="gluster-swift based on centos 7" \
-      io.k8s.description="gluster-swift image is based on centos image which enables files and directories created on GlusterFS to be accessed as objects via the Swift and S3 API." \
-      description="gluster-swift image is based on centos image which enables files and directories created on GlusterFS to be accessed as objects via the Swift and S3 API." \
-      io.openshift.tags="gluster,glusterfs,gluster-swift"
-
-RUN yum -v --setopt=tsflags=nodocs -y update && \
-    yum -v --setopt=tsflags=nodocs -y install \
-        centos-release-openstack-kilo \
-        epel-release && \
-    yum -v --setopt=tsflags=nodocs -y install \
-        openstack-swift openstack-swift-{proxy,account,container,object,plugin-swift3} \
-        git memcached python-prettytable && \
-    yum -y install systemd && \
-        (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
-        rm -f /lib/systemd/system/multi-user.target.wants/*;\
-        rm -f /etc/systemd/system/*.wants/*;\
-        rm -f /lib/systemd/system/local-fs.target.wants/*; \
-        rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
-        rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
-        rm -f /lib/systemd/system/basic.target.wants/*;\
-        rm -f /lib/systemd/system/anaconda.target.wants/* && \
-    yum -y clean all
-
-# Install gluster-swift from source.
-# TODO: When gluster-swift is shipped as RPM, just use that.
-RUN git clone git://review.gluster.org/gluster-swift /tmp/gluster-swift && \
-    cd /tmp/gluster-swift && \
-    python setup.py install && \
-    cd - && \
-    rm -rf /tmp/gluster-swift
-
-# Gluster volumes will be mounted *under* this directory.
-VOLUME /mnt/gluster-object
-
-# Copy systemd scripts
-COPY swift-gen-builders.service /lib/systemd/system/
-COPY swift-proxy.service /lib/systemd/system/
-COPY swift-account.service /lib/systemd/system/
-COPY swift-container.service /lib/systemd/system/
-COPY swift-object.service /lib/systemd/system/
-
-# Replace openstack swift conf files with local gluster-swift ones
-COPY etc/swift/* /etc/swift/
-
-# To update volume name used by swift-gen-builders service
-COPY update_gluster_vol.sh /usr/local/bin/update_gluster_vol.sh
-RUN chmod +x /usr/local/bin/update_gluster_vol.sh
-
-# volumes to be exposed as object storage is present in swift-volumes file
-COPY etc/sysconfig/swift-volumes /etc/sysconfig/swift-volumes
-
-# The proxy server listens on port 8080
-EXPOSE 8080
-
-RUN echo 'root:password' | chpasswd
-VOLUME [ "/sys/fs/cgroup" ]
-
-RUN systemctl enable swift-gen-builders.service;\
-systemctl enable memcached.service;\
-systemctl enable swift-proxy.service;\
-systemctl enable swift-account.service;\
-systemctl enable swift-container.service;\
-systemctl enable swift-object.service;
-
-ENTRYPOINT ["/usr/local/bin/update_gluster_vol.sh"]
-CMD ["/usr/sbin/init"]
diff --git a/gluster-object/CentOS/docker-gluster-swift/README.md b/gluster-object/CentOS/docker-gluster-swift/README.md
deleted file mode 100644
index 41e966d..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/README.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-# docker-gluster-swift
-docker-gluster-swift is to provide object interface for a Gluster volume.
-
-Let us see how to run gluster-swift inside a docker container.
-
-## Building
-
-```bash
-# docker build --rm --tag gluster-swift .
-```
-
-## Running
-
-On the host machine, mount one or more gluster volumes under the directory
-`/mnt/gluster-object` with mountpoint name being same as that of the volume.
-
-For example, if you have two gluster volumes named `test` and `test2`, they
-should be mounted at `/mnt/gluster-object/test` and `/mnt/gluster-object/test2`
-respectively. This directory on the host machine containing all the individual
-glusterfs mounts is then bind-mounted inside the container. This avoids having
-to bind mount individual gluster volumes.
-
-The same needs to be updated in etc/sysconfig/swift-volumes.
-For example(in swift-volumes):
-GLUSTER_VOLUMES='tv1'
-
-Where tv1 is the volume name.
-
-**Example:**
-
-```bash
-# docker run -d --privileged  -v /sys/fs/cgroup/:/sys/fs/cgroup/:ro -p 8080:8080 -v /mnt/gluster-object:/mnt/gluster-object    -e GLUSTER_VOLUMES="tv1" gluster-swift
-```
-
-If you have selinux set to enforced on the host machine, refer to the
-Troubleshooting section below before running the container.
-
-**Note:**
-
-~~~
--d : Runs the container in the background.
--p : Publishes the container's port to the host port. They need not be the same.
-     If host port is omitted, a random port will be mapped. So you can run
-     multiple instances of the container, each serving on a different port on
-     the same host machine.
--v : Bind mount a host path inside the container.
--e : Set and pass environment variable. In our case, provide a list of volumes
-     to be exported over object inerface by setting GLUSTER_VOLUMES environment
-     variable.
-~~~
-
-### Custom deployment
-
-You can provide your own configuration files and ring files and have the
-swift processes running inside container use those. This can be done by
-placing your conf files and ring files in a directory on your host machine
-and then bind-mounting it inside the container at `/etc/swift`.
-
-**Example:**
-
-Assuming you have conf files and ring files present at `/tmp/swift` on the
-machine, you can spawn the container as follows:
-
-```bash
-# docker run -d -p 8080:8080 -v /tmp/swift:/etc/swift -v /mnt/gluster-object:/mnt/gluster-object prashanthpai/gluster-swift:dev
-```
-
-If the host machine has SELinux set to enforced:
-
-```bash
-# chcon -Rt svirt_sandbox_file_t /tmp/swift
-```
-
-### Troubleshooting
-
-**SELinux**
-
-When a volume is bind mounted inside the container, you'll need blessings of
-SELinux on the host machine. Otherwise, the application inside the container
-won't be able to access the volume. Example:
-
-```bash
-[root@f24 ~]# docker exec -i -t nostalgic_goodall /bin/bash
-[root@042abf4acc4d /]# ls /mnt/gluster-object/
-ls: cannot open directory /mnt/gluster-object/: Permission denied
-```
-
-Ideally, running this command on host machine should work:
-
-```bash
-# chcon -Rt svirt_sandbox_file_t /mnt/gluster-object
-```
-
-However, glusterfs does not support setting of SELinux contexts [yet][1].
-You can always set SELinux to permissive on host machine by running
-`setenforce 0` or run container in privileged mode (`--privileged=true`).
-I don't like either. A better workaround would be to mount the glusterfs
-volumes on host machine as shown in following example:
-
-[1]: https://bugzilla.redhat.com/show_bug.cgi?id=1252627
-
-```bash
-mount -t glusterfs -o selinux,context="system_u:object_r:svirt_sandbox_file_t:s0" `hostname`:test /mnt/gluster-object/test
-```
-
-### TODO
-
-* Install gluster-swift from RPMs. (Currently installed from source)
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf
deleted file mode 100644
index 726f9c7..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf
+++ /dev/null
@@ -1,39 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the account-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-mount_check = false
-bind_port = 6012
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:gluster_swift#account
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameter is used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the account server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
-
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf
deleted file mode 100644
index e8f67e3..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf
+++ /dev/null
@@ -1,39 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the container-server workers
-# start, you can *consider* setting this value to "false" to reduce the
-# per-request overhead it can incur.
-mount_check = false
-bind_port = 6011
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:gluster_swift#container
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameters is used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the container server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
-
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf
deleted file mode 100644
index 31a5e6f..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-[DEFAULT]
-#
-# IP address of a node in the GlusterFS server cluster hosting the
-# volumes to be served via Swift API.
-mount_ip = localhost
-
-# Performance optimization parameter. When turned off, the filesystem will
-# see a reduced number of stat calls, resulting in substantially faster
-# response time for GET and HEAD container requests on containers with large
-# numbers of objects, at the expense of an accurate count of combined bytes
-# used by all objects in the container. For most installations "off" works
-# fine.
-accurate_size_in_listing = off
-
-# In older versions of gluster-swift, metadata stored as xattrs of dirs/files
-# were serialized using PICKLE format. The PICKLE format is vulnerable to
-# exploits in deployments where a user has access to backend filesystem over
-# FUSE/SMB. Deserializing pickled metadata can result in malicious code being
-# executed if an attacker has stored malicious code as xattr from filesystem
-# interface. Although, new metadata is always serialized using JSON format,
-# existing metadata already stored in PICKLE format are loaded by default.
-# You can turn this option to 'off' once you have migrated all your metadata
-# from PICKLE format to JSON format using gluster-swift-migrate-metadata tool.
-read_pickled_metadata = on
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf
deleted file mode 100644
index 8be8626..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf
+++ /dev/null
@@ -1,61 +0,0 @@
-[DEFAULT]
-user = root
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-devices = /mnt/gluster-object
-
-[object-expirer]
-user = root
-log_facility = LOG_LOCAL2
-log_level = INFO
-
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-
-# The expirer will re-attempt expiring if the source object is not available
-# up to reclaim_age seconds before it gives up and deletes the entry in the
-# queue. In gluster-swift, you'd almost always want to set this to zero.
-reclaim_age = 0
-
-# Do not retry DELETEs on getting 404. Hence default is set to 1.
-request_tries = 1
-
-# The swift-object-expirer daemon will run every 'interval' number of seconds
-# interval = 300
-
-# Emit a log line report of the progress so far every 'report_interval'
-# number of seconds.
-# report_interval = 300
-
-# concurrency is the level of concurrency to use to do the work, this value
-# must be set to at least 1
-# concurrency = 1
-
-# processes is how many parts to divide the work into, one part per process
-# that will be doing the work
-# processes set 0 means that a single process will be doing all the work
-# processes can also be specified on the command line and will override the
-# config value
-# processes = 0
-
-# process is which of the parts a particular process will work on
-# process can also be specified on the command line and will overide the config
-# value
-# process is "zero based", if you want to use 3 processes, you should run
-# processes with process set to 0, 1, and 2
-# process = 0
-
-
-[pipeline:main]
-pipeline = catch_errors cache proxy-server
-
-[app:proxy-server]
-use = egg:gluster_swift#proxy
-
-[filter:cache]
-use = egg:swift#memcache
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf
deleted file mode 100644
index 2c7df2e..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf
+++ /dev/null
@@ -1,55 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the object-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-mount_check = false
-bind_port = 6010
-#
-# Maximum number of clients one worker can process simultaneously (it will
-# actually accept N + 1). Setting this to one (1) will only handle one request
-# at a time, without accepting another request concurrently. By increasing the
-# number of workers to a much higher value, one can prevent slow file system
-# operations for one request from starving other requests.
-max_clients = 1024
-#
-# If not doing the above, setting this value initially to match the number of
-# CPUs is a good starting point for determining the right value.
-workers = 1
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-
-[pipeline:main]
-pipeline = object-server
-
-[app:object-server]
-use = egg:gluster_swift#object
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-#
-# For performance, after ensuring things are running in a stable manner, you
-# can turn off normal request logging for the object server to reduce the
-# per-request overhead and unclutter the log files. Warnings and errors will
-# still be logged.
-log_requests = off
-#
-# Adjust this value to match the stripe width of the underlying storage array
-# (not the stripe element size). This will provide a reasonable starting point
-# for tuning this value.
-disk_chunk_size = 65536
-#
-# Adjust this value match whatever is set for the disk_chunk_size initially.
-# This will provide a reasonable starting point for tuning this value.
-network_chunk_size = 65536
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf
deleted file mode 100644
index 979b735..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf
+++ /dev/null
@@ -1,99 +0,0 @@
-[DEFAULT]
-bind_port = 8080
-user = root
-# Consider using 1 worker per CPU
-workers = 1
-
-[pipeline:main]
-pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk ratelimit swift3 staticweb slo dlo proxy-logging proxy-server
-
-[app:proxy-server]
-use = egg:gluster_swift#proxy
-log_facility = LOG_LOCAL1
-log_level = WARN
-# The API allows for account creation and deletion, but since Gluster/Swift
-# automounts a Gluster volume for a given account, there is no way to create
-# or delete an account. So leave this off.
-allow_account_management = false
-account_autocreate = true
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
-# of the entire object given that all metadata is stored in the object
-# extended attributes (no .meta file used after creation) and no container
-# sync feature to present.
-object_post_as_copy = false
-# Only need to recheck the account exists once a day
-recheck_account_existence = 86400
-# May want to consider bumping this up if containers are created and destroyed
-# infrequently.
-recheck_container_existence = 60
-# Timeout clients that don't read or write to the proxy server after 5
-# seconds.
-client_timeout = 5
-# Give more time to connect to the object, container or account servers in
-# cases of high load.
-conn_timeout = 5
-# For high load situations, once connected to an object, container or account
-# server, allow for delays communicating with them.
-node_timeout = 60
-# May want to consider bumping up this value to 1 - 4 MB depending on how much
-# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
-# stripe width (not stripe element size) of your storage volume is a good
-# starting point. See below for sizing information.
-object_chunk_size = 65536
-# If you do decide to increase the object_chunk_size, then consider lowering
-# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
-# be queued to the object server for processing. Given one proxy server worker
-# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
-# * 1,024 bytes of memory in the worse case (default values). Be sure the
-# amount of memory available on the system can accommodate increased values
-# for object_chunk_size.
-put_queue_depth = 10
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-access_log_level = WARN
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-
-[filter:cache]
-use = egg:swift#memcache
-# Update this line to contain a comma separated list of memcache servers
-# shared by all nodes running the proxy-server service.
-memcache_servers = localhost:11211
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-
-[filter:bulk]
-use = egg:swift#bulk
-
-[filter:staticweb]
-use = egg:swift#staticweb
-
-[filter:slo]
-use = egg:swift#slo
-
-[filter:dlo]
-use = egg:swift#dlo
-
-[filter:tempauth]
-use = egg:swift#tempauth
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-user_test5_tester5 = testing5 service
-
-[filter:swift3]
-use = egg:swift3#swift3
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf
deleted file mode 100644
index f64ba5a..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-[DEFAULT]
-
-
-[swift-hash]
-# random unique string that can never change (DO NOT LOSE)
-swift_hash_path_suffix = gluster
-
-
-# The swift-constraints section sets the basic constraints on data
-# saved in the swift cluster.
-
-[swift-constraints]
-
-# max_file_size is the largest "normal" object that can be saved in
-# the cluster. This is also the limit on the size of each segment of
-# a "large" object when using the large object manifest support.
-# This value is set in bytes. Setting it to lower than 1MiB will cause
-# some tests to fail.
-# Default is 1 TiB = 2**30*1024
-max_file_size = 1099511627776
-
-
-# max_meta_name_length is the max number of bytes in the utf8 encoding
-# of the name portion of a metadata header.
-
-#max_meta_name_length = 128
-
-
-# max_meta_value_length is the max number of bytes in the utf8 encoding
-# of a metadata value
-
-#max_meta_value_length = 256
-
-
-# max_meta_count is the max number of metadata keys that can be stored
-# on a single account, container, or object
-
-#max_meta_count = 90
-
-
-# max_meta_overall_size is the max number of bytes in the utf8 encoding
-# of the metadata (keys + values)
-
-#max_meta_overall_size = 4096
-
-
-# max_object_name_length is the max number of bytes in the utf8 encoding of an
-# object name: Gluster FS can handle much longer file names, but the length
-# between the slashes of the URL is handled below. Remember that most web
-# clients can't handle anything greater than 2048, and those that do are
-# rather clumsy.
-
-max_object_name_length = 2048
-
-# max_object_name_component_length (GlusterFS) is the max number of bytes in
-# the utf8 encoding of an object name component (the part between the
-# slashes); this is a limit imposed by the underlying file system (for XFS it
-# is 255 bytes).
-
-max_object_name_component_length = 255
-
-# container_listing_limit is the default (and max) number of items
-# returned for a container listing request
-
-#container_listing_limit = 10000
-
-
-# account_listing_limit is the default (and max) number of items returned
-# for an account listing request
-
-#account_listing_limit = 10000
-
-
-# max_account_name_length is the max number of bytes in the utf8 encoding of
-# an account name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_account_name_length = 255
-
-
-# max_container_name_length is the max number of bytes in the utf8 encoding
-# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_container_name_length = 255
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes b/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes
deleted file mode 100644
index 8b49f07..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes
+++ /dev/null
@@ -1,2 +0,0 @@
-# Set Gluster volumes to be used by gluster-object service
-GLUSTER_VOLUMES="tv1"
diff --git a/gluster-object/CentOS/docker-gluster-swift/memcached.service b/gluster-object/CentOS/docker-gluster-swift/memcached.service
deleted file mode 100644
index 7aae000..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/memcached.service
+++ /dev/null
@@ -1,9 +0,0 @@
-[Unit]
-Description=Memcached Service
-
-[Service]
-ExecStart=/usr/bin/memcached -u root
-Restart=on-abort
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/swift-account.service b/gluster-object/CentOS/docker-gluster-swift/swift-account.service
deleted file mode 100644
index 2b74cfd..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/swift-account.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Swift Account Service
-After=swift-proxy.service
-
-[Service]
-ExecStart=/usr/bin/python /usr/bin/swift-account-server /etc/swift/account-server.conf
-Restart=on-abort
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/swift-container.service b/gluster-object/CentOS/docker-gluster-swift/swift-container.service
deleted file mode 100644
index 6d83db4..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/swift-container.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Swift Container Service
-After=swift-account.service
-
-[Service]
-ExecStart=/usr/bin/python /usr/bin/swift-container-server /etc/swift/container-server.conf
-Restart=on-abort
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/swift-gen-builders.service b/gluster-object/CentOS/docker-gluster-swift/swift-gen-builders.service
deleted file mode 100644
index ab30a7c..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/swift-gen-builders.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=Swift Gen Builders
-Before=memcached.service
-
-[Service]
-Type=oneshot
-EnvironmentFile=-/etc/sysconfig/swift-volumes
-ExecStart=/usr/bin/gluster-swift-gen-builders $GLUSTER_VOLUMES
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/swift-object.service b/gluster-object/CentOS/docker-gluster-swift/swift-object.service
deleted file mode 100644
index 502759d..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/swift-object.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Swift Object Service
-After=swift-container.service
-
-[Service]
-ExecStart=/usr/bin/python /usr/bin/swift-object-server /etc/swift/object-server.conf
-Restart=on-abort
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/swift-proxy.service b/gluster-object/CentOS/docker-gluster-swift/swift-proxy.service
deleted file mode 100644
index 8421bf6..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/swift-proxy.service
+++ /dev/null
@@ -1,10 +0,0 @@
-[Unit]
-Description=Swift Proxy Service
-After=memcached.service
-
-[Service]
-ExecStart=/usr/bin/python /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
-Restart=on-abort
-
-[Install]
-WantedBy=multi-user.target
diff --git a/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh b/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh
deleted file mode 100644
index dfb891d..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-# To update gluster volume name in swift-volumes, used by swift-gen-builders.service
-if [ -z "$GLUSTER_VOLUMES" ]; then
-        echo "You need to set GLUSTER_VOLUMES env variable"
-        exit 1
-else
-        echo "GLUSTER_VOLUMES env variable is set. Update in swift-volumes"
-        sed -i.bak '/^GLUSTER_VOLUMES=/s/=.*/='\""$GLUSTER_VOLUMES"\"'/' /etc/sysconfig/swift-volumes
-fi
-
-# Hand off to CMD
-exec "$@"
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/Dockerfile b/gluster-s3object/CentOS/docker-gluster-s3/Dockerfile
new file mode 100644
index 0000000..9ec1e2f
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/Dockerfile
@@ -0,0 +1,82 @@
+FROM centos:7
+MAINTAINER Prashanth Pai <ppai@redhat.com>
+
+# centos-release-openstack-kilo package resides in the extras repo.
+# All subsequent actual packages come from the CentOS Cloud SIG repo:
+# http://mirror.centos.org/centos/7/cloud/x86_64/
+
+# Install PACO servers and S3 middleware.
+# Install gluster-swift dependencies. To be removed when RPMs become available.
+# Clean downloaded packages and index
+
+LABEL architecture="x86_64" \
+      name="gluster/gluster-swift" \
+      version="kilo" \
+      vendor="Red Hat, Inc" \
+      summary="This image has a running gluster-swift service ( centos 7 + gluster-swift)" \
+      io.k8s.display-name="gluster-swift based on centos 7" \
+      io.k8s.description="gluster-swift image is based on centos image which enables files and directories created on GlusterFS to be accessed as objects via the Swift and S3 API." \
+      description="gluster-swift image is based on centos image which enables files and directories created on GlusterFS to be accessed as objects via the Swift and S3 API." \
+      io.openshift.tags="gluster,glusterfs,gluster-swift"
+
+RUN yum -v --setopt=tsflags=nodocs -y update && \
+    yum -v --setopt=tsflags=nodocs -y install \
+        centos-release-openstack-kilo \
+        epel-release && \
+    yum -v --setopt=tsflags=nodocs -y install \
+        openstack-swift openstack-swift-{proxy,account,container,object,plugin-swift3} \
+        git memcached python-prettytable && \
+    yum -y install systemd && \
+        (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
+        rm -f /lib/systemd/system/multi-user.target.wants/*;\
+        rm -f /etc/systemd/system/*.wants/*;\
+        rm -f /lib/systemd/system/local-fs.target.wants/*; \
+        rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
+        rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
+        rm -f /lib/systemd/system/basic.target.wants/*;\
+        rm -f /lib/systemd/system/anaconda.target.wants/* && \
+    yum -y clean all
+
+# Install gluster-swift from source.
+# TODO: When gluster-swift is shipped as RPM, just use that.
+RUN git clone git://review.gluster.org/gluster-swift /tmp/gluster-swift && \
+    cd /tmp/gluster-swift && \
+    python setup.py install && \
+    cd - && \
+    rm -rf /tmp/gluster-swift
+
+# Gluster volumes will be mounted *under* this directory.
+VOLUME /mnt/gluster-object
+
+# Copy systemd scripts
+COPY swift-gen-builders.service /lib/systemd/system/
+COPY swift-proxy.service /lib/systemd/system/
+COPY swift-account.service /lib/systemd/system/
+COPY swift-container.service /lib/systemd/system/
+COPY swift-object.service /lib/systemd/system/
+
+# Replace openstack swift conf files with local gluster-swift ones
+COPY etc/swift/* /etc/swift/
+
+# To update volume name used by swift-gen-builders service
+COPY update_gluster_vol.sh /usr/local/bin/update_gluster_vol.sh
+RUN chmod +x /usr/local/bin/update_gluster_vol.sh
+
+# volumes to be exposed as object storage is present in swift-volumes file
+COPY etc/sysconfig/swift-volumes /etc/sysconfig/swift-volumes
+
+# The proxy server listens on port 8080
+EXPOSE 8080
+
+RUN echo 'root:password' | chpasswd
+VOLUME [ "/sys/fs/cgroup" ]
+
+RUN systemctl enable swift-gen-builders.service;\
+systemctl enable memcached.service;\
+systemctl enable swift-proxy.service;\
+systemctl enable swift-account.service;\
+systemctl enable swift-container.service;\
+systemctl enable swift-object.service;
+
+ENTRYPOINT ["/usr/local/bin/update_gluster_vol.sh"]
+CMD ["/usr/sbin/init"]
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/README.md b/gluster-s3object/CentOS/docker-gluster-s3/README.md
new file mode 100644
index 0000000..baa6d28
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/README.md
@@ -0,0 +1,109 @@
+
+# docker-gluster-s3
+docker-gluster-s3 is to provide object interface for a Gluster volume.
+
+Let us see how to run gluster-s3 inside a docker container.
+
+## Building
+
+```bash
+# docker build --rm --tag gluster-s3 .
+```
+
+## Running
+
+On the host machine, mount one or more gluster volumes under the directory
+`/mnt/gluster-object` with mountpoint name being same as that of the volume.
+
+For example, if you have two gluster volumes named `test` and `test2`, they
+should be mounted at `/mnt/gluster-object/test` and `/mnt/gluster-object/test2`
+respectively. This directory on the host machine containing all the individual
+glusterfs mounts is then bind-mounted inside the container. This avoids having
+to bind mount individual gluster volumes.
+
+The same needs to be updated in etc/sysconfig/swift-volumes.
+For example(in swift-volumes):
+GLUSTER_VOLUMES='tv1'
+
+Where tv1 is the volume name.
+
+**Example:**
+
+```bash
+# docker run -d --privileged  -v /sys/fs/cgroup/:/sys/fs/cgroup/:ro -p 8080:8080 -v /mnt/gluster-object:/mnt/gluster-object -e GLUSTER_VOLUMES="tv1" gluster-s3
+```
+
+If you have selinux set to enforced on the host machine, refer to the
+Troubleshooting section below before running the container.
+
+**Note:**
+
+~~~
+-d : Runs the container in the background.
+-p : Publishes the container's port to the host port. They need not be the same.
+     If host port is omitted, a random port will be mapped. So you can run
+     multiple instances of the container, each serving on a different port on
+     the same host machine.
+-v : Bind mount a host path inside the container.
+-e : Set and pass environment variable. In our case, provide a list of volumes
+     to be exported over object inerface by setting GLUSTER_VOLUMES environment
+     variable.
+~~~
+
+### Custom deployment
+
+You can provide your own configuration files and ring files and have the
+swift processes running inside container use those. This can be done by
+placing your conf files and ring files in a directory on your host machine
+and then bind-mounting it inside the container at `/etc/swift`.
+
+**Example:**
+
+Assuming you have conf files and ring files present at `/tmp/swift` on the
+machine, you can spawn the container as follows:
+
+```bash
+# docker run -d -p 8080:8080 -v /tmp/swift:/etc/swift -v /mnt/gluster-object:/mnt/gluster-object gluster-s3
+```
+
+If the host machine has SELinux set to enforced:
+
+```bash
+# chcon -Rt svirt_sandbox_file_t /tmp/swift
+```
+
+### Troubleshooting
+
+**SELinux**
+
+When a volume is bind mounted inside the container, you'll need blessings of
+SELinux on the host machine. Otherwise, the application inside the container
+won't be able to access the volume. Example:
+
+```bash
+[root@f24 ~]# docker exec -i -t nostalgic_goodall /bin/bash
+[root@042abf4acc4d /]# ls /mnt/gluster-object/
+ls: cannot open directory /mnt/gluster-object/: Permission denied
+```
+
+Ideally, running this command on host machine should work:
+
+```bash
+# chcon -Rt svirt_sandbox_file_t /mnt/gluster-object
+```
+
+However, glusterfs does not support setting of SELinux contexts [yet][1].
+You can always set SELinux to permissive on host machine by running
+`setenforce 0` or run container in privileged mode (`--privileged=true`).
+I don't like either. A better workaround would be to mount the glusterfs
+volumes on host machine as shown in following example:
+
+[1]: https://bugzilla.redhat.com/show_bug.cgi?id=1252627
+
+```bash
+mount -t glusterfs -o selinux,context="system_u:object_r:svirt_sandbox_file_t:s0" `hostname`:test /mnt/gluster-object/test
+```
+
+### TODO
+
+* Install gluster-s3 from RPMs. (Currently installed from source)
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/account-server.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/account-server.conf
new file mode 100644
index 0000000..726f9c7
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/account-server.conf
@@ -0,0 +1,39 @@
+[DEFAULT]
+#
+# Default gluster mount point to be used for object store,can be changed by
+# setting the following value in {account,container,object}-server.conf files.
+# It is recommended to keep this value same for all the three services but can
+# be kept different if environment demands.
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the account-server workers start,
+# you can *consider* setting this value to "false" to reduce the per-request
+# overhead it can incur.
+mount_check = false
+bind_port = 6012
+#
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+#
+# One or two workers should be sufficient for almost any installation of
+# Gluster.
+workers = 1
+
+[pipeline:main]
+pipeline = account-server
+
+[app:account-server]
+use = egg:gluster_swift#account
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+# The following parameter is used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+#
+# After ensuring things are running in a stable manner, you can turn off
+# normal request logging for the account server to unclutter the log
+# files. Warnings and errors will still be logged.
+log_requests = off
+
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/container-server.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/container-server.conf
new file mode 100644
index 0000000..e8f67e3
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/container-server.conf
@@ -0,0 +1,39 @@
+[DEFAULT]
+#
+# Default gluster mount point to be used for object store,can be changed by
+# setting the following value in {account,container,object}-server.conf files.
+# It is recommended to keep this value same for all the three services but can
+# be kept different if environment demands.
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the container-server workers
+# start, you can *consider* setting this value to "false" to reduce the
+# per-request overhead it can incur.
+mount_check = false
+bind_port = 6011
+#
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+#
+# One or two workers should be sufficient for almost any installation of
+# Gluster.
+workers = 1
+
+[pipeline:main]
+pipeline = container-server
+
+[app:container-server]
+use = egg:gluster_swift#container
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+# The following parameters is used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+#
+# After ensuring things are running in a stable manner, you can turn off
+# normal request logging for the container server to unclutter the log
+# files. Warnings and errors will still be logged.
+log_requests = off
+
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/fs.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/fs.conf
new file mode 100644
index 0000000..31a5e6f
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/fs.conf
@@ -0,0 +1,24 @@
+[DEFAULT]
+#
+# IP address of a node in the GlusterFS server cluster hosting the
+# volumes to be served via Swift API.
+mount_ip = localhost
+
+# Performance optimization parameter. When turned off, the filesystem will
+# see a reduced number of stat calls, resulting in substantially faster
+# response time for GET and HEAD container requests on containers with large
+# numbers of objects, at the expense of an accurate count of combined bytes
+# used by all objects in the container. For most installations "off" works
+# fine.
+accurate_size_in_listing = off
+
+# In older versions of gluster-swift, metadata stored as xattrs of dirs/files
+# were serialized using PICKLE format. The PICKLE format is vulnerable to
+# exploits in deployments where a user has access to backend filesystem over
+# FUSE/SMB. Deserializing pickled metadata can result in malicious code being
+# executed if an attacker has stored malicious code as xattr from filesystem
+# interface. Although, new metadata is always serialized using JSON format,
+# existing metadata already stored in PICKLE format are loaded by default.
+# You can turn this option to 'off' once you have migrated all your metadata
+# from PICKLE format to JSON format using gluster-swift-migrate-metadata tool.
+read_pickled_metadata = on
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-expirer.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-expirer.conf
new file mode 100644
index 0000000..8be8626
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-expirer.conf
@@ -0,0 +1,61 @@
+[DEFAULT]
+user = root
+# Default gluster mount point to be used for object store,can be changed by
+# setting the following value in {account,container,object}-server.conf files.
+devices = /mnt/gluster-object
+
+[object-expirer]
+user = root
+log_facility = LOG_LOCAL2
+log_level = INFO
+
+# The following parameters are used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+expiring_objects_account_name = expiring
+
+# The expirer will re-attempt expiring if the source object is not available
+# up to reclaim_age seconds before it gives up and deletes the entry in the
+# queue. In gluster-swift, you'd almost always want to set this to zero.
+reclaim_age = 0
+
+# Do not retry DELETEs on getting 404. Hence default is set to 1.
+request_tries = 1
+
+# The swift-object-expirer daemon will run every 'interval' number of seconds
+# interval = 300
+
+# Emit a log line report of the progress so far every 'report_interval'
+# number of seconds.
+# report_interval = 300
+
+# concurrency is the level of concurrency to use to do the work, this value
+# must be set to at least 1
+# concurrency = 1
+
+# processes is how many parts to divide the work into, one part per process
+# that will be doing the work
+# processes set 0 means that a single process will be doing all the work
+# processes can also be specified on the command line and will override the
+# config value
+# processes = 0
+
+# process is which of the parts a particular process will work on
+# process can also be specified on the command line and will overide the config
+# value
+# process is "zero based", if you want to use 3 processes, you should run
+# processes with process set to 0, 1, and 2
+# process = 0
+
+
+[pipeline:main]
+pipeline = catch_errors cache proxy-server
+
+[app:proxy-server]
+use = egg:gluster_swift#proxy
+
+[filter:cache]
+use = egg:swift#memcache
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-server.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-server.conf
new file mode 100644
index 0000000..2c7df2e
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/object-server.conf
@@ -0,0 +1,55 @@
+[DEFAULT]
+#
+# Default gluster mount point to be used for object store,can be changed by
+# setting the following value in {account,container,object}-server.conf files.
+# It is recommended to keep this value same for all the three services but can
+# be kept different if environment demands.
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the object-server workers start,
+# you can *consider* setting this value to "false" to reduce the per-request
+# overhead it can incur.
+mount_check = false
+bind_port = 6010
+#
+# Maximum number of clients one worker can process simultaneously (it will
+# actually accept N + 1). Setting this to one (1) will only handle one request
+# at a time, without accepting another request concurrently. By increasing the
+# number of workers to a much higher value, one can prevent slow file system
+# operations for one request from starving other requests.
+max_clients = 1024
+#
+# If not doing the above, setting this value initially to match the number of
+# CPUs is a good starting point for determining the right value.
+workers = 1
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+
+[pipeline:main]
+pipeline = object-server
+
+[app:object-server]
+use = egg:gluster_swift#object
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+# The following parameters are used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+expiring_objects_account_name = expiring
+#
+# For performance, after ensuring things are running in a stable manner, you
+# can turn off normal request logging for the object server to reduce the
+# per-request overhead and unclutter the log files. Warnings and errors will
+# still be logged.
+log_requests = off
+#
+# Adjust this value to match the stripe width of the underlying storage array
+# (not the stripe element size). This will provide a reasonable starting point
+# for tuning this value.
+disk_chunk_size = 65536
+#
+# Adjust this value match whatever is set for the disk_chunk_size initially.
+# This will provide a reasonable starting point for tuning this value.
+network_chunk_size = 65536
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/proxy-server.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/proxy-server.conf
new file mode 100644
index 0000000..979b735
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/proxy-server.conf
@@ -0,0 +1,99 @@
+[DEFAULT]
+bind_port = 8080
+user = root
+# Consider using 1 worker per CPU
+workers = 1
+
+[pipeline:main]
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk ratelimit swift3 staticweb slo dlo proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:gluster_swift#proxy
+log_facility = LOG_LOCAL1
+log_level = WARN
+# The API allows for account creation and deletion, but since Gluster/Swift
+# automounts a Gluster volume for a given account, there is no way to create
+# or delete an account. So leave this off.
+allow_account_management = false
+account_autocreate = true
+# The following parameters are used by object-expirer and needs to be same
+# across all conf files!
+auto_create_account_prefix = gs
+expiring_objects_account_name = expiring
+# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
+# of the entire object given that all metadata is stored in the object
+# extended attributes (no .meta file used after creation) and no container
+# sync feature to present.
+object_post_as_copy = false
+# Only need to recheck the account exists once a day
+recheck_account_existence = 86400
+# May want to consider bumping this up if containers are created and destroyed
+# infrequently.
+recheck_container_existence = 60
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+client_timeout = 5
+# Give more time to connect to the object, container or account servers in
+# cases of high load.
+conn_timeout = 5
+# For high load situations, once connected to an object, container or account
+# server, allow for delays communicating with them.
+node_timeout = 60
+# May want to consider bumping up this value to 1 - 4 MB depending on how much
+# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
+# stripe width (not stripe element size) of your storage volume is a good
+# starting point. See below for sizing information.
+object_chunk_size = 65536
+# If you do decide to increase the object_chunk_size, then consider lowering
+# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
+# be queued to the object server for processing. Given one proxy server worker
+# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
+# * 1,024 bytes of memory in the worse case (default values). Be sure the
+# amount of memory available on the system can accommodate increased values
+# for object_chunk_size.
+put_queue_depth = 10
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+access_log_level = WARN
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:cache]
+use = egg:swift#memcache
+# Update this line to contain a comma separated list of memcache servers
+# shared by all nodes running the proxy-server service.
+memcache_servers = localhost:11211
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+
+[filter:bulk]
+use = egg:swift#bulk
+
+[filter:staticweb]
+use = egg:swift#staticweb
+
+[filter:slo]
+use = egg:swift#slo
+
+[filter:dlo]
+use = egg:swift#dlo
+
+[filter:tempauth]
+use = egg:swift#tempauth
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+user_test5_tester5 = testing5 service
+
+[filter:swift3]
+use = egg:swift3#swift3
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/swift.conf b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/swift.conf
new file mode 100644
index 0000000..f64ba5a
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/swift/swift.conf
@@ -0,0 +1,85 @@
+[DEFAULT]
+
+
+[swift-hash]
+# random unique string that can never change (DO NOT LOSE)
+swift_hash_path_suffix = gluster
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail.
+# Default is 1 TiB = 2**30*1024
+max_file_size = 1099511627776
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding of an
+# object name: Gluster FS can handle much longer file names, but the length
+# between the slashes of the URL is handled below. Remember that most web
+# clients can't handle anything greater than 2048, and those that do are
+# rather clumsy.
+
+max_object_name_length = 2048
+
+# max_object_name_component_length (GlusterFS) is the max number of bytes in
+# the utf8 encoding of an object name component (the part between the
+# slashes); this is a limit imposed by the underlying file system (for XFS it
+# is 255 bytes).
+
+max_object_name_component_length = 255
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding of
+# an account name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_account_name_length = 255
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_container_name_length = 255
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/etc/sysconfig/swift-volumes b/gluster-s3object/CentOS/docker-gluster-s3/etc/sysconfig/swift-volumes
new file mode 100644
index 0000000..8b49f07
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/etc/sysconfig/swift-volumes
@@ -0,0 +1,2 @@
+# Set Gluster volumes to be used by gluster-object service
+GLUSTER_VOLUMES="tv1"
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/memcached.service b/gluster-s3object/CentOS/docker-gluster-s3/memcached.service
new file mode 100644
index 0000000..7aae000
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/memcached.service
@@ -0,0 +1,9 @@
+[Unit]
+Description=Memcached Service
+
+[Service]
+ExecStart=/usr/bin/memcached -u root
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/swift-account.service b/gluster-s3object/CentOS/docker-gluster-s3/swift-account.service
new file mode 100644
index 0000000..2b74cfd
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/swift-account.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift Account Service
+After=swift-proxy.service
+
+[Service]
+ExecStart=/usr/bin/python /usr/bin/swift-account-server /etc/swift/account-server.conf
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/swift-container.service b/gluster-s3object/CentOS/docker-gluster-s3/swift-container.service
new file mode 100644
index 0000000..6d83db4
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/swift-container.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift Container Service
+After=swift-account.service
+
+[Service]
+ExecStart=/usr/bin/python /usr/bin/swift-container-server /etc/swift/container-server.conf
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/swift-gen-builders.service b/gluster-s3object/CentOS/docker-gluster-s3/swift-gen-builders.service
new file mode 100644
index 0000000..ab30a7c
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/swift-gen-builders.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Swift Gen Builders
+Before=memcached.service
+
+[Service]
+Type=oneshot
+EnvironmentFile=-/etc/sysconfig/swift-volumes
+ExecStart=/usr/bin/gluster-swift-gen-builders $GLUSTER_VOLUMES
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/swift-object.service b/gluster-s3object/CentOS/docker-gluster-s3/swift-object.service
new file mode 100644
index 0000000..502759d
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/swift-object.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift Object Service
+After=swift-container.service
+
+[Service]
+ExecStart=/usr/bin/python /usr/bin/swift-object-server /etc/swift/object-server.conf
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/swift-proxy.service b/gluster-s3object/CentOS/docker-gluster-s3/swift-proxy.service
new file mode 100644
index 0000000..8421bf6
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/swift-proxy.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift Proxy Service
+After=memcached.service
+
+[Service]
+ExecStart=/usr/bin/python /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+Restart=on-abort
+
+[Install]
+WantedBy=multi-user.target
diff --git a/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh b/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh
new file mode 100644
index 0000000..dfb891d
--- /dev/null
+++ b/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# To update gluster volume name in swift-volumes, used by swift-gen-builders.service
+if [ -z "$GLUSTER_VOLUMES" ]; then
+        echo "You need to set GLUSTER_VOLUMES env variable"
+        exit 1
+else
+        echo "GLUSTER_VOLUMES env variable is set. Update in swift-volumes"
+        sed -i.bak '/^GLUSTER_VOLUMES=/s/=.*/='\""$GLUSTER_VOLUMES"\"'/' /etc/sysconfig/swift-volumes
+fi
+
+# Hand off to CMD
+exec "$@"
diff --git a/tests/shelltest/test_gluster_container_basic.sh b/tests/shelltest/test_gluster_container_basic.sh
index 8d139ca..452e34d 100755
--- a/tests/shelltest/test_gluster_container_basic.sh
+++ b/tests/shelltest/test_gluster_container_basic.sh
@@ -35,12 +35,12 @@ testit "test shellcheck ${BASE_DIR}/CentOS/gluster-setup.sh" \
         test_shellcheck ${BASE_DIR}/CentOS/gluster-setup.sh \
         || ((failed++))
 
-testit "test script syntax ${BASE_DIR}/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh" \
-        test_syntax ${BASE_DIR}/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh \
+testit "test script syntax ${BASE_DIR}/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh" \
+        test_syntax ${BASE_DIR}/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh \
         || ((failed++))
 
-testit "test shellcheck ${BASE_DIR}/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh" \
-        test_shellcheck ${BASE_DIR}/gluster-object/CentOS/docker-gluster-swift/update_gluster_vol.sh \
+testit "test shellcheck ${BASE_DIR}/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh" \
+        test_shellcheck ${BASE_DIR}/gluster-s3object/CentOS/docker-gluster-s3/update_gluster_vol.sh \
         || ((failed++))
 
 
-- 
cgit v1.2.3