swiftonhpss/etc/proxy-server.conf-gluster
Peter Portante 3a75da67cb Remove "ufo" directory, promoting contents to top-level
Additionally, we drop the "ufo" references from setup.py, spec file
and README, and add the HISTORY file describing how the repo was
initially created. We also update the RPM spec file to use the name
"gluster-for-swift" to avoid colliding with existing RPM names from
RHS 2.0 (the spec file's description was also updated, along with
the version number).

Change-Id: If804224a94208d57896e4189c63736ffc9e01d5e
Signed-off-by: Peter Portante <peter.portante@redhat.com>
Reviewed-on: http://review.gluster.org/4966
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Reviewed-by: Mohammed Junaid <junaid@redhat.com>
Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Reviewed-by: Peter Portante <pportant@redhat.com>
Tested-by: Peter Portante <pportant@redhat.com>
2013-05-10 07:12:08 -07:00

70 lines
2.7 KiB
Plaintext

[DEFAULT]
bind_port = 8080
user = root
log_facility = LOG_LOCAL1
# Consider using 1 worker per CPU
workers = 1
[pipeline:main]
pipeline = healthcheck cache tempauth proxy-server
[app:proxy-server]
use = egg:gluster_swift_ufo#proxy
log_facility = LOG_LOCAL1
# The API allows for account creation and deletion, but since Gluster/Swift
# automounts a Gluster volume for a given account, there is no way to create
# or delete an account. So leave this off.
allow_account_management = false
account_autocreate = true
# Only need to recheck the account exists once a day
recheck_account_existence = 86400
# May want to consider bumping this up if containers are created and destroyed
# infrequently.
recheck_container_existence = 60
# Timeout clients that don't read or write to the proxy server after 5
# seconds.
client_timeout = 5
# Give more time to connect to the object, container or account servers in
# cases of high load.
conn_timeout = 5
# For high load situations, once connected to an object, container or account
# server, allow for delays communicating with them.
node_timeout = 60
# May want to consider bumping up this value to 1 - 4 MB depending on how much
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
# stripe width (not stripe element size) of your storage volume is a good
# starting point. See below for sizing information.
object_chunk_size = 65536
# If you do decide to increase the object_chunk_size, then consider lowering
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
# be queued to the object server for processing. Given one proxy server worker
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
# * 1,024 bytes of memory in the worse case (default values). Be sure the
# amount of memory available on the system can accommodate increased values
# for object_chunk_size.
put_queue_depth = 10
[filter:tempauth]
use = egg:swift#tempauth
# Here you need to add users explicitly. See the OpenStack Swift Deployment
# Guide for more information. The user and user64 directives take the
# following form:
# user_<account>_<username> = <key> [group] [group] [...] [storage_url]
# user64_<account_b64>_<username_b64> = <key> [group] [group] [...] [storage_url]
# Where you use user64 for accounts and/or usernames that include underscores.
#
# NOTE (and WARNING): The account name must match the device name specified
# when generating the account, container, and object build rings.
#
# E.g.
# user_ufo0_admin = abc123 .admin
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
# Update this line to contain a comma separated list of memcache servers
# shared by all nodes running the proxy-server service.
memcache_servers = localhost:11211