summaryrefslogtreecommitdiffstats
path: root/gluster-object/CentOS/docker-gluster-swift/etc
diff options
context:
space:
mode:
authorSaravanakumar <sarumuga@redhat.com>2017-06-29 17:58:56 +0530
committerSaravanakumar <sarumuga@redhat.com>2017-06-29 18:03:40 +0530
commit3e33c9640d2b4b2d15a21483c34bb48845fce7bf (patch)
tree740a0189b03506e48447b993d88a1149253e3ea4 /gluster-object/CentOS/docker-gluster-swift/etc
parent8e29c3a6f749837e694a6cb031b8740935a62dc3 (diff)
downloadgluster-3e33c9640d2b4b2d15a21483c34bb48845fce7bf.tar.gz
gluster-3e33c9640d2b4b2d15a21483c34bb48845fce7bf.tar.bz2
gluster-3e33c9640d2b4b2d15a21483c34bb48845fce7bf.tar.xz
gluster-3e33c9640d2b4b2d15a21483c34bb48845fce7bf.zip
rename docker-gluster-swift as docker-gluster-s3 and the top
level directory gluster-object as gluster-s3object Update README and test scripts to reflect name change. Signed-off-by: Saravanakumar <sarumuga@redhat.com>
Diffstat (limited to 'gluster-object/CentOS/docker-gluster-swift/etc')
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf39
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf39
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf24
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf61
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf55
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf99
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf85
-rw-r--r--gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes2
8 files changed, 0 insertions, 404 deletions
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf
deleted file mode 100644
index 726f9c7..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/account-server.conf
+++ /dev/null
@@ -1,39 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the account-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-mount_check = false
-bind_port = 6012
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:gluster_swift#account
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameter is used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the account server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
-
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf
deleted file mode 100644
index e8f67e3..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/container-server.conf
+++ /dev/null
@@ -1,39 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the container-server workers
-# start, you can *consider* setting this value to "false" to reduce the
-# per-request overhead it can incur.
-mount_check = false
-bind_port = 6011
-#
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-#
-# One or two workers should be sufficient for almost any installation of
-# Gluster.
-workers = 1
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:gluster_swift#container
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameters is used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-#
-# After ensuring things are running in a stable manner, you can turn off
-# normal request logging for the container server to unclutter the log
-# files. Warnings and errors will still be logged.
-log_requests = off
-
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf
deleted file mode 100644
index 31a5e6f..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/fs.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-[DEFAULT]
-#
-# IP address of a node in the GlusterFS server cluster hosting the
-# volumes to be served via Swift API.
-mount_ip = localhost
-
-# Performance optimization parameter. When turned off, the filesystem will
-# see a reduced number of stat calls, resulting in substantially faster
-# response time for GET and HEAD container requests on containers with large
-# numbers of objects, at the expense of an accurate count of combined bytes
-# used by all objects in the container. For most installations "off" works
-# fine.
-accurate_size_in_listing = off
-
-# In older versions of gluster-swift, metadata stored as xattrs of dirs/files
-# were serialized using PICKLE format. The PICKLE format is vulnerable to
-# exploits in deployments where a user has access to backend filesystem over
-# FUSE/SMB. Deserializing pickled metadata can result in malicious code being
-# executed if an attacker has stored malicious code as xattr from filesystem
-# interface. Although, new metadata is always serialized using JSON format,
-# existing metadata already stored in PICKLE format are loaded by default.
-# You can turn this option to 'off' once you have migrated all your metadata
-# from PICKLE format to JSON format using gluster-swift-migrate-metadata tool.
-read_pickled_metadata = on
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf
deleted file mode 100644
index 8be8626..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-expirer.conf
+++ /dev/null
@@ -1,61 +0,0 @@
-[DEFAULT]
-user = root
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-devices = /mnt/gluster-object
-
-[object-expirer]
-user = root
-log_facility = LOG_LOCAL2
-log_level = INFO
-
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-
-# The expirer will re-attempt expiring if the source object is not available
-# up to reclaim_age seconds before it gives up and deletes the entry in the
-# queue. In gluster-swift, you'd almost always want to set this to zero.
-reclaim_age = 0
-
-# Do not retry DELETEs on getting 404. Hence default is set to 1.
-request_tries = 1
-
-# The swift-object-expirer daemon will run every 'interval' number of seconds
-# interval = 300
-
-# Emit a log line report of the progress so far every 'report_interval'
-# number of seconds.
-# report_interval = 300
-
-# concurrency is the level of concurrency to use to do the work, this value
-# must be set to at least 1
-# concurrency = 1
-
-# processes is how many parts to divide the work into, one part per process
-# that will be doing the work
-# processes set 0 means that a single process will be doing all the work
-# processes can also be specified on the command line and will override the
-# config value
-# processes = 0
-
-# process is which of the parts a particular process will work on
-# process can also be specified on the command line and will overide the config
-# value
-# process is "zero based", if you want to use 3 processes, you should run
-# processes with process set to 0, 1, and 2
-# process = 0
-
-
-[pipeline:main]
-pipeline = catch_errors cache proxy-server
-
-[app:proxy-server]
-use = egg:gluster_swift#proxy
-
-[filter:cache]
-use = egg:swift#memcache
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf
deleted file mode 100644
index 2c7df2e..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/object-server.conf
+++ /dev/null
@@ -1,55 +0,0 @@
-[DEFAULT]
-#
-# Default gluster mount point to be used for object store,can be changed by
-# setting the following value in {account,container,object}-server.conf files.
-# It is recommended to keep this value same for all the three services but can
-# be kept different if environment demands.
-devices = /mnt/gluster-object
-#
-# Once you are confident that your startup processes will always have your
-# gluster volumes properly mounted *before* the object-server workers start,
-# you can *consider* setting this value to "false" to reduce the per-request
-# overhead it can incur.
-mount_check = false
-bind_port = 6010
-#
-# Maximum number of clients one worker can process simultaneously (it will
-# actually accept N + 1). Setting this to one (1) will only handle one request
-# at a time, without accepting another request concurrently. By increasing the
-# number of workers to a much higher value, one can prevent slow file system
-# operations for one request from starving other requests.
-max_clients = 1024
-#
-# If not doing the above, setting this value initially to match the number of
-# CPUs is a good starting point for determining the right value.
-workers = 1
-# Override swift's default behaviour for fallocate.
-disable_fallocate = true
-
-[pipeline:main]
-pipeline = object-server
-
-[app:object-server]
-use = egg:gluster_swift#object
-user = root
-log_facility = LOG_LOCAL2
-log_level = WARN
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-#
-# For performance, after ensuring things are running in a stable manner, you
-# can turn off normal request logging for the object server to reduce the
-# per-request overhead and unclutter the log files. Warnings and errors will
-# still be logged.
-log_requests = off
-#
-# Adjust this value to match the stripe width of the underlying storage array
-# (not the stripe element size). This will provide a reasonable starting point
-# for tuning this value.
-disk_chunk_size = 65536
-#
-# Adjust this value match whatever is set for the disk_chunk_size initially.
-# This will provide a reasonable starting point for tuning this value.
-network_chunk_size = 65536
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf
deleted file mode 100644
index 979b735..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/proxy-server.conf
+++ /dev/null
@@ -1,99 +0,0 @@
-[DEFAULT]
-bind_port = 8080
-user = root
-# Consider using 1 worker per CPU
-workers = 1
-
-[pipeline:main]
-pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk ratelimit swift3 staticweb slo dlo proxy-logging proxy-server
-
-[app:proxy-server]
-use = egg:gluster_swift#proxy
-log_facility = LOG_LOCAL1
-log_level = WARN
-# The API allows for account creation and deletion, but since Gluster/Swift
-# automounts a Gluster volume for a given account, there is no way to create
-# or delete an account. So leave this off.
-allow_account_management = false
-account_autocreate = true
-# The following parameters are used by object-expirer and needs to be same
-# across all conf files!
-auto_create_account_prefix = gs
-expiring_objects_account_name = expiring
-# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
-# of the entire object given that all metadata is stored in the object
-# extended attributes (no .meta file used after creation) and no container
-# sync feature to present.
-object_post_as_copy = false
-# Only need to recheck the account exists once a day
-recheck_account_existence = 86400
-# May want to consider bumping this up if containers are created and destroyed
-# infrequently.
-recheck_container_existence = 60
-# Timeout clients that don't read or write to the proxy server after 5
-# seconds.
-client_timeout = 5
-# Give more time to connect to the object, container or account servers in
-# cases of high load.
-conn_timeout = 5
-# For high load situations, once connected to an object, container or account
-# server, allow for delays communicating with them.
-node_timeout = 60
-# May want to consider bumping up this value to 1 - 4 MB depending on how much
-# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
-# stripe width (not stripe element size) of your storage volume is a good
-# starting point. See below for sizing information.
-object_chunk_size = 65536
-# If you do decide to increase the object_chunk_size, then consider lowering
-# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
-# be queued to the object server for processing. Given one proxy server worker
-# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
-# * 1,024 bytes of memory in the worse case (default values). Be sure the
-# amount of memory available on the system can accommodate increased values
-# for object_chunk_size.
-put_queue_depth = 10
-
-[filter:catch_errors]
-use = egg:swift#catch_errors
-
-[filter:proxy-logging]
-use = egg:swift#proxy_logging
-access_log_level = WARN
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-
-[filter:cache]
-use = egg:swift#memcache
-# Update this line to contain a comma separated list of memcache servers
-# shared by all nodes running the proxy-server service.
-memcache_servers = localhost:11211
-
-[filter:gatekeeper]
-use = egg:swift#gatekeeper
-
-[filter:ratelimit]
-use = egg:swift#ratelimit
-
-[filter:bulk]
-use = egg:swift#bulk
-
-[filter:staticweb]
-use = egg:swift#staticweb
-
-[filter:slo]
-use = egg:swift#slo
-
-[filter:dlo]
-use = egg:swift#dlo
-
-[filter:tempauth]
-use = egg:swift#tempauth
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-user_test5_tester5 = testing5 service
-
-[filter:swift3]
-use = egg:swift3#swift3
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf b/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf
deleted file mode 100644
index f64ba5a..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/swift/swift.conf
+++ /dev/null
@@ -1,85 +0,0 @@
-[DEFAULT]
-
-
-[swift-hash]
-# random unique string that can never change (DO NOT LOSE)
-swift_hash_path_suffix = gluster
-
-
-# The swift-constraints section sets the basic constraints on data
-# saved in the swift cluster.
-
-[swift-constraints]
-
-# max_file_size is the largest "normal" object that can be saved in
-# the cluster. This is also the limit on the size of each segment of
-# a "large" object when using the large object manifest support.
-# This value is set in bytes. Setting it to lower than 1MiB will cause
-# some tests to fail.
-# Default is 1 TiB = 2**30*1024
-max_file_size = 1099511627776
-
-
-# max_meta_name_length is the max number of bytes in the utf8 encoding
-# of the name portion of a metadata header.
-
-#max_meta_name_length = 128
-
-
-# max_meta_value_length is the max number of bytes in the utf8 encoding
-# of a metadata value
-
-#max_meta_value_length = 256
-
-
-# max_meta_count is the max number of metadata keys that can be stored
-# on a single account, container, or object
-
-#max_meta_count = 90
-
-
-# max_meta_overall_size is the max number of bytes in the utf8 encoding
-# of the metadata (keys + values)
-
-#max_meta_overall_size = 4096
-
-
-# max_object_name_length is the max number of bytes in the utf8 encoding of an
-# object name: Gluster FS can handle much longer file names, but the length
-# between the slashes of the URL is handled below. Remember that most web
-# clients can't handle anything greater than 2048, and those that do are
-# rather clumsy.
-
-max_object_name_length = 2048
-
-# max_object_name_component_length (GlusterFS) is the max number of bytes in
-# the utf8 encoding of an object name component (the part between the
-# slashes); this is a limit imposed by the underlying file system (for XFS it
-# is 255 bytes).
-
-max_object_name_component_length = 255
-
-# container_listing_limit is the default (and max) number of items
-# returned for a container listing request
-
-#container_listing_limit = 10000
-
-
-# account_listing_limit is the default (and max) number of items returned
-# for an account listing request
-
-#account_listing_limit = 10000
-
-
-# max_account_name_length is the max number of bytes in the utf8 encoding of
-# an account name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_account_name_length = 255
-
-
-# max_container_name_length is the max number of bytes in the utf8 encoding
-# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
-# size as max_object_name_component_length above.
-
-max_container_name_length = 255
diff --git a/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes b/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes
deleted file mode 100644
index 8b49f07..0000000
--- a/gluster-object/CentOS/docker-gluster-swift/etc/sysconfig/swift-volumes
+++ /dev/null
@@ -1,2 +0,0 @@
-# Set Gluster volumes to be used by gluster-object service
-GLUSTER_VOLUMES="tv1"