Cleanup functest and undo old patch
Conf files used by different functests were duplicated. If they needed to be modified (for example, to add new options for object expiration), they had to be modified at several places. * added object expirer config that were missing. * Undid http://review.gluster.org/6444 which was no longer required. Change-Id: I601b0dd8fdb10520f81523d8e292e944b34e28ce Signed-off-by: Prashanth Pai <ppai@redhat.com>
This commit is contained in:
parent
051e068d1e
commit
637dac947c
66
.functests
66
.functests
@ -18,66 +18,8 @@
|
||||
# This program expects to be run by tox in a virtual python environment
|
||||
# so that it does not pollute the host development system
|
||||
|
||||
sudo_env()
|
||||
{
|
||||
sudo bash -c "PATH=$PATH $*"
|
||||
}
|
||||
# Run functional tests with tempauth as auth middleware
|
||||
bash tools/tempauth_functional_tests.sh
|
||||
|
||||
cleanup()
|
||||
{
|
||||
sudo service memcached stop
|
||||
sudo_env swift-init main stop
|
||||
sudo rm -rf /etc/swift > /dev/null 2>&1
|
||||
sudo rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
|
||||
sudo setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
|
||||
}
|
||||
|
||||
quit()
|
||||
{
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
fail()
|
||||
{
|
||||
cleanup
|
||||
quit "$1"
|
||||
}
|
||||
|
||||
### MAIN ###
|
||||
# This script runs functional tests only with tempauth
|
||||
|
||||
# Only run if there is no configuration in the system
|
||||
if [ -x /etc/swift ] ; then
|
||||
quit "/etc/swift exists, cannot run functional tests."
|
||||
fi
|
||||
|
||||
# Check the directories exist
|
||||
DIRS="/mnt/gluster-object /mnt/gluster-object/test /mnt/gluster-object/test2"
|
||||
for d in $DIRS ; do
|
||||
if [ ! -x $d ] ; then
|
||||
quit "$d must exist on an XFS or GlusterFS volume"
|
||||
fi
|
||||
done
|
||||
|
||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||
|
||||
# Install the configuration files
|
||||
sudo mkdir /etc/swift > /dev/null 2>&1
|
||||
sudo cp -r test/functional_auth/tempauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
sudo_env gluster-swift-gen-builders test test2 || fail "Unable to create ring files"
|
||||
|
||||
# Start the services
|
||||
sudo service memcached start || fail "Unable to start memcached"
|
||||
sudo_env swift-init main start || fail "Unable to start swift"
|
||||
|
||||
mkdir functional_tests_result > /dev/null 2>&1
|
||||
nosetests -v --exe \
|
||||
--with-xunit \
|
||||
--xunit-file functional_tests_result/gluster-swift-generic-functional-TC-report.xml \
|
||||
--with-html-output \
|
||||
--html-out-file functional_tests_result/gluster-swift-generic-functional-result.html \
|
||||
test/functional || fail "Functional tests failed"
|
||||
cleanup
|
||||
exit 0
|
||||
# Run functional tests with gswauth as auth middleware
|
||||
bash tools/gswauth_functional_tests.sh
|
||||
|
@ -37,7 +37,6 @@ _allow_mount_per_server = False
|
||||
_implicit_dir_objects = False
|
||||
_container_update_object_count = False
|
||||
_account_update_container_count = False
|
||||
_ignore_unsupported_headers = False
|
||||
|
||||
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
||||
try:
|
||||
@ -98,18 +97,6 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
||||
except (NoSectionError, NoOptionError):
|
||||
pass
|
||||
|
||||
# -- Hidden configuration option --
|
||||
# Ignore unsupported headers and allow them in a request without
|
||||
# returning a 400-BadRequest. This setting can be set to
|
||||
# allow unsupported headers such as X-Delete-At and
|
||||
# X-Delete-After even though they will not be used.
|
||||
try:
|
||||
_ignore_unsupported_headers = \
|
||||
_fs_conf.get('DEFAULT',
|
||||
'ignore_unsupported_headers',
|
||||
"no") in TRUE_VALUES
|
||||
except (NoSectionError, NoOptionError):
|
||||
pass
|
||||
|
||||
NAME = 'glusterfs'
|
||||
|
||||
|
@ -23,7 +23,6 @@ import swift.common.ring as _ring
|
||||
from gluster.swift.common import Glusterfs, ring
|
||||
|
||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
||||
UNSUPPORTED_HEADERS = []
|
||||
|
||||
|
||||
def set_object_name_component_length(len=None):
|
||||
@ -56,40 +55,8 @@ def validate_obj_name_component(obj):
|
||||
return ''
|
||||
|
||||
|
||||
def validate_headers(req):
|
||||
"""
|
||||
Validate client header requests
|
||||
:param req: Http request
|
||||
"""
|
||||
if not Glusterfs._ignore_unsupported_headers:
|
||||
for unsupported_header in UNSUPPORTED_HEADERS:
|
||||
if unsupported_header in req.headers:
|
||||
return '%s headers are not supported' \
|
||||
% ','.join(UNSUPPORTED_HEADERS)
|
||||
return ''
|
||||
|
||||
# Save the original check object creation
|
||||
__check_object_creation = swift.common.constraints.check_object_creation
|
||||
__check_metadata = swift.common.constraints.check_metadata
|
||||
|
||||
|
||||
def gluster_check_metadata(req, target_type, POST=True):
|
||||
"""
|
||||
:param req: HTTP request object
|
||||
:param target_type: Value from POST passed to __check_metadata
|
||||
:param POST: Only call __check_metadata on POST since Swift only
|
||||
calls check_metadata on POSTs.
|
||||
"""
|
||||
ret = None
|
||||
if POST:
|
||||
ret = __check_metadata(req, target_type)
|
||||
if ret is None:
|
||||
bdy = validate_headers(req)
|
||||
if bdy:
|
||||
ret = HTTPBadRequest(body=bdy,
|
||||
request=req,
|
||||
content_type='text/plain')
|
||||
return ret
|
||||
|
||||
|
||||
# Define our new one which invokes the original
|
||||
@ -119,14 +86,11 @@ def gluster_check_object_creation(req, object_name):
|
||||
ret = HTTPBadRequest(body=bdy,
|
||||
request=req,
|
||||
content_type='text/plain')
|
||||
if ret is None:
|
||||
ret = gluster_check_metadata(req, 'object', POST=False)
|
||||
|
||||
return ret
|
||||
|
||||
# Replace the original checks with ours
|
||||
swift.common.constraints.check_object_creation = gluster_check_object_creation
|
||||
swift.common.constraints.check_metadata = gluster_check_metadata
|
||||
|
||||
# Replace the original check mount with ours
|
||||
swift.common.constraints.check_mount = Glusterfs.mount
|
||||
|
@ -66,45 +66,6 @@ class TestFile(Base):
|
||||
self.assertTrue(data == file_item.read())
|
||||
self.assert_status(200)
|
||||
|
||||
def testInvalidHeadersPUT(self):
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest()
|
||||
file = self.env.container.file(Utils.create_name())
|
||||
self.assertRaises(ResponseError,
|
||||
file.write_random,
|
||||
self.env.file_size,
|
||||
hdrs={'X-Delete-At': '9876545321'})
|
||||
self.assert_status(400)
|
||||
self.assertRaises(ResponseError,
|
||||
file.write_random,
|
||||
self.env.file_size,
|
||||
hdrs={'X-Delete-After': '60'})
|
||||
self.assert_status(400)
|
||||
|
||||
def testInvalidHeadersPOST(self):
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest()
|
||||
file = self.env.container.file(Utils.create_name())
|
||||
file.write_random(self.env.file_size)
|
||||
headers = file.make_headers(cfg={})
|
||||
headers.update({ 'X-Delete-At' : '987654321'})
|
||||
# Need to call conn.make_request instead of file.sync_metadata
|
||||
# because sync_metadata calls make_headers. make_headers()
|
||||
# overwrites any headers in file.metadata as 'user' metadata
|
||||
# by appending 'X-Object-Meta-' to any of the headers
|
||||
# in file.metadata.
|
||||
file.conn.make_request('POST', file.path, hdrs=headers, cfg={})
|
||||
self.assertEqual(400, file.conn.response.status)
|
||||
|
||||
headers = file.make_headers(cfg={})
|
||||
headers.update({ 'X-Delete-After' : '60'})
|
||||
file.conn.make_request('POST', file.path, hdrs=headers, cfg={})
|
||||
self.assertEqual(400, file.conn.response.status)
|
||||
|
||||
|
||||
class TestFileUTF8(Base2, TestFile):
|
||||
set_up = False
|
||||
|
@ -30,3 +30,7 @@ log_level = WARN
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
@ -33,3 +33,7 @@ log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
||||
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
27
test/functional_auth/common_conf/object-expirer.conf
Normal file
27
test/functional_auth/common_conf/object-expirer.conf
Normal file
@ -0,0 +1,27 @@
|
||||
#TODO: Add documentation to explain various options
|
||||
#For now, refer: https://github.com/openstack/swift/blob/master/etc/object-expirer.conf-sample
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = DEBUG
|
||||
# The following parameters are used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
interval = 30
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:gluster_swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -46,3 +46,8 @@ disk_chunk_size = 65536
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65556
|
||||
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
58
test/functional_auth/common_conf/test.conf
Normal file
58
test/functional_auth/common_conf/test.conf
Normal file
@ -0,0 +1,58 @@
|
||||
[func_test]
|
||||
# sample config
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 8080
|
||||
auth_ssl = no
|
||||
auth_prefix = /auth/
|
||||
## sample config for Swift with Keystone
|
||||
#auth_version = 2
|
||||
#auth_host = localhost
|
||||
#auth_port = 5000
|
||||
#auth_ssl = no
|
||||
#auth_prefix = /v2.0/
|
||||
|
||||
# GSWauth internal admin user configuration information
|
||||
admin_key = gswauthkey
|
||||
admin_user = .super_admin
|
||||
|
||||
# Gluster setup information
|
||||
devices = /mnt/gluster-object
|
||||
gsmetadata_volume = gsmetadata
|
||||
|
||||
# Primary functional test account (needs admin access to the account)
|
||||
account = test
|
||||
username = tester
|
||||
password = testing
|
||||
|
||||
# User on a second account (needs admin access to the account)
|
||||
account2 = test2
|
||||
username2 = tester2
|
||||
password2 = testing2
|
||||
|
||||
# User on same account as first, but without admin access
|
||||
username3 = tester3
|
||||
password3 = testing3
|
||||
|
||||
# Default constraints if not defined here, the test runner will try
|
||||
# to set them from /etc/swift/swift.conf. If that file isn't found,
|
||||
# the test runner will skip tests that depend on these values.
|
||||
# Note that the cluster must have "sane" values for the test suite to pass.
|
||||
#max_file_size = 5368709122
|
||||
#max_meta_name_length = 128
|
||||
#max_meta_value_length = 256
|
||||
#max_meta_count = 90
|
||||
#max_meta_overall_size = 4096
|
||||
#max_object_name_length = 1024
|
||||
#container_listing_limit = 10000
|
||||
#account_listing_limit = 10000
|
||||
#max_account_name_length = 256
|
||||
#max_container_name_length = 256
|
||||
normalized_urls = True
|
||||
|
||||
collate = C
|
||||
|
||||
[unit_test]
|
||||
fake_syslog = False
|
||||
|
||||
[probe_test]
|
||||
# check_server_timeout = 30
|
@ -1,17 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
# auto_create_account_prefix = .
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -1,48 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the object-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6010
|
||||
#
|
||||
# Maximum number of clients one worker can process simultaneously (it will
|
||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||
# at a time, without accepting another request concurrently. By increasing the
|
||||
# number of workers to a much higher value, one can prevent slow file system
|
||||
# operations for one request from starving other requests.
|
||||
max_clients = 1024
|
||||
#
|
||||
# If not doing the above, setting this value initially to match the number of
|
||||
# CPUs is a good starting point for determining the right value.
|
||||
workers = 1
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:gluster_swift#object
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# For performance, after ensuring things are running in a stable manner, you
|
||||
# can turn off normal request logging for the object server to reduce the
|
||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||
# still be logged.
|
||||
log_requests = off
|
||||
#
|
||||
# Adjust this value to match the stripe width of the underlying storage array
|
||||
# (not the stripe element size). This will provide a reasonable starting point
|
||||
# for tuning this value.
|
||||
disk_chunk_size = 65536
|
||||
#
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65556
|
@ -48,6 +48,10 @@ object_chunk_size = 65536
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
@ -58,19 +62,6 @@ use = egg:swift#proxy_logging
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
|
||||
[filter:gswauth]
|
||||
use = egg:gluster_swift#gswauth
|
||||
set log_name = gswauth
|
||||
super_admin_key = gswauthkey
|
||||
metadata_volume = gsmetadata
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
@ -79,3 +70,9 @@ memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
|
||||
[filter:gswauth]
|
||||
use = egg:gluster_swift#gswauth
|
||||
set log_name = gswauth
|
||||
super_admin_key = gswauthkey
|
||||
metadata_volume = gsmetadata
|
||||
|
@ -1,32 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
@ -1,19 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
#
|
||||
# *** Keep on for Functional Tests ***
|
||||
accurate_size_in_listing = on
|
||||
|
||||
# *** Keep on for Functional Tests ***
|
||||
container_update_object_count = on
|
||||
account_update_container_count = on
|
@ -1,48 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the object-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6010
|
||||
#
|
||||
# Maximum number of clients one worker can process simultaneously (it will
|
||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||
# at a time, without accepting another request concurrently. By increasing the
|
||||
# number of workers to a much higher value, one can prevent slow file system
|
||||
# operations for one request from starving other requests.
|
||||
max_clients = 1024
|
||||
#
|
||||
# If not doing the above, setting this value initially to match the number of
|
||||
# CPUs is a good starting point for determining the right value.
|
||||
workers = 1
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:gluster_swift#object
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# For performance, after ensuring things are running in a stable manner, you
|
||||
# can turn off normal request logging for the object server to reduce the
|
||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||
# still be logged.
|
||||
log_requests = off
|
||||
#
|
||||
# Adjust this value to match the stripe width of the underlying storage array
|
||||
# (not the stripe element size). This will provide a reasonable starting point
|
||||
# for tuning this value.
|
||||
disk_chunk_size = 65536
|
||||
#
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65556
|
@ -49,6 +49,10 @@ object_chunk_size = 65536
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
@ -59,13 +63,14 @@ use = egg:swift#proxy_logging
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_d4dde08c621a4f0fb4cde0ac6a62aa0c_tester = testing .admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
# shared by all nodes running the proxy-server service.
|
||||
memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||
@ -87,11 +92,4 @@ operator_roles = admin
|
||||
is_admin = true
|
||||
cache = swift.cache
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
# shared by all nodes running the proxy-server service.
|
||||
memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
|
@ -1,85 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = gluster
|
||||
|
||||
|
||||
# The swift-constraints section sets the basic constraints on data
|
||||
# saved in the swift cluster.
|
||||
|
||||
[swift-constraints]
|
||||
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail.
|
||||
# Default is 1 TiB = 2**30*1024
|
||||
max_file_size = 1099511627776
|
||||
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||
# object name: Gluster FS can handle much longer file names, but the length
|
||||
# between the slashes of the URL is handled below. Remember that most web
|
||||
# clients can't handle anything greater than 2048, and those that do are
|
||||
# rather clumsy.
|
||||
|
||||
max_object_name_length = 2048
|
||||
|
||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||
# the utf8 encoding of an object name component (the part between the
|
||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||
# is 255 bytes).
|
||||
|
||||
max_object_name_component_length = 255
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_account_name_length = 255
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_container_name_length = 255
|
@ -1,36 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# Default gluster mount point to be used for object store,can be changed by
|
||||
# setting the following value in {account,container,object}-server.conf files.
|
||||
# It is recommended to keep this value same for all the three services but can
|
||||
# be kept different if environment demands.
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
mount_check = true
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
@ -1,36 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# Default gluster mount point to be used for object store,can be changed by
|
||||
# setting the following value in {account,container,object}-server.conf files.
|
||||
# It is recommended to keep this value same for all the three services but can
|
||||
# be kept different if environment demands.
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
mount_check = true
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
@ -1,13 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
accurate_size_in_listing = off
|
@ -1,51 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# Default gluster mount point to be used for object store,can be changed by
|
||||
# setting the following value in {account,container,object}-server.conf files.
|
||||
# It is recommended to keep this value same for all the three services but can
|
||||
# be kept different if environment demands.
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the object-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
mount_check = true
|
||||
bind_port = 6010
|
||||
#
|
||||
# Maximum number of clients one worker can process simultaneously (it will
|
||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||
# at a time, without accepting another request concurrently. By increasing the
|
||||
# number of workers to a much higher value, one can prevent slow file system
|
||||
# operations for one request from starving other requests.
|
||||
max_clients = 1024
|
||||
#
|
||||
# If not doing the above, setting this value initially to match the number of
|
||||
# CPUs is a good starting point for determining the right value.
|
||||
workers = 1
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:gluster_swift#object
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# For performance, after ensuring things are running in a stable manner, you
|
||||
# can turn off normal request logging for the object server to reduce the
|
||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||
# still be logged.
|
||||
log_requests = off
|
||||
#
|
||||
# Adjust this value to match the stripe width of the underlying storage array
|
||||
# (not the stripe element size). This will provide a reasonable starting point
|
||||
# for tuning this value.
|
||||
disk_chunk_size = 65536
|
||||
#
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65536
|
@ -48,6 +48,10 @@ object_chunk_size = 65536
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
@ -59,10 +63,6 @@ access_log_level = WARN
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:gluster_swift#kerbauth
|
||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
@ -71,3 +71,7 @@ memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:gluster_swift#kerbauth
|
||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
|
@ -1,84 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = gluster
|
||||
|
||||
|
||||
# The swift-constraints section sets the basic constraints on data
|
||||
# saved in the swift cluster.
|
||||
|
||||
[swift-constraints]
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail.
|
||||
# Default is 1 TiB = 2**30*1024
|
||||
|
||||
max_file_size = 1099511627776
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||
# object name: Gluster FS can handle much longer file names, but the length
|
||||
# between the slashes of the URL is handled below. Remember that most web
|
||||
# clients can't handle anything greater than 2048, and those that do are
|
||||
# rather clumsy.
|
||||
|
||||
max_object_name_length = 2048
|
||||
|
||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||
# the utf8 encoding of an object name component (the part between the
|
||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||
# is 255 bytes).
|
||||
|
||||
max_object_name_component_length = 255
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_account_name_length = 255
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_container_name_length = 255
|
@ -1,32 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
@ -1,19 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
#
|
||||
# *** Keep on for Functional Tests ***
|
||||
accurate_size_in_listing = on
|
||||
|
||||
# *** Keep on for Functional Tests ***
|
||||
container_update_object_count = on
|
||||
account_update_container_count = on
|
@ -1,17 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
# auto_create_account_prefix = .
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -48,6 +48,10 @@ object_chunk_size = 65536
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
@ -58,13 +62,6 @@ use = egg:swift#proxy_logging
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
@ -73,3 +70,10 @@ memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
|
@ -1,85 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = gluster
|
||||
|
||||
|
||||
# The swift-constraints section sets the basic constraints on data
|
||||
# saved in the swift cluster.
|
||||
|
||||
[swift-constraints]
|
||||
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail.
|
||||
# Default is 1 TiB = 2**30*1024
|
||||
max_file_size = 1099511627776
|
||||
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||
# object name: Gluster FS can handle much longer file names, but the length
|
||||
# between the slashes of the URL is handled below. Remember that most web
|
||||
# clients can't handle anything greater than 2048, and those that do are
|
||||
# rather clumsy.
|
||||
|
||||
max_object_name_length = 2048
|
||||
|
||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||
# the utf8 encoding of an object name component (the part between the
|
||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||
# is 255 bytes).
|
||||
|
||||
max_object_name_component_length = 255
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_account_name_length = 255
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_container_name_length = 255
|
@ -75,65 +75,6 @@ class TestConstraints(unittest.TestCase):
|
||||
self.assertTrue(cnt.validate_obj_name_component('..'))
|
||||
self.assertTrue(cnt.validate_obj_name_component(''))
|
||||
|
||||
def test_validate_headers(self):
|
||||
req = Mock()
|
||||
req.headers = []
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-some-header']
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest
|
||||
req.headers = ['x-delete-at', 'x-some-header']
|
||||
self.assertNotEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-delete-after', 'x-some-header']
|
||||
self.assertNotEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
|
||||
self.assertNotEqual(cnt.validate_headers(req), '')
|
||||
|
||||
def test_validate_headers_ignoring_config_set(self):
|
||||
with patch('gluster.swift.common.constraints.'
|
||||
'Glusterfs._ignore_unsupported_headers', True):
|
||||
req = Mock()
|
||||
req.headers = []
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-some-header']
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest
|
||||
req.headers = ['x-delete-at', 'x-some-header']
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-delete-after', 'x-some-header']
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
|
||||
self.assertEqual(cnt.validate_headers(req), '')
|
||||
|
||||
def test_gluster_check_metadata(self):
|
||||
mock_check_metadata = Mock()
|
||||
with patch('gluster.swift.common.constraints.__check_metadata',
|
||||
mock_check_metadata):
|
||||
req = Mock()
|
||||
req.headers = []
|
||||
cnt.gluster_check_metadata(req, 'object')
|
||||
self.assertTrue(1, mock_check_metadata.call_count)
|
||||
cnt.gluster_check_metadata(req, 'object', POST=False)
|
||||
self.assertTrue(1, mock_check_metadata.call_count)
|
||||
req.headers = ['x-some-header']
|
||||
self.assertEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest
|
||||
req.headers = ['x-delete-at', 'x-some-header']
|
||||
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
|
||||
req.headers = ['x-delete-after', 'x-some-header']
|
||||
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
|
||||
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
|
||||
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
|
||||
|
||||
def test_gluster_check_object_creation(self):
|
||||
with patch('gluster.swift.common.constraints.__check_object_creation',
|
||||
mock_check_object_creation):
|
||||
@ -147,9 +88,3 @@ class TestConstraints(unittest.TestCase):
|
||||
req = Mock()
|
||||
req.headers = []
|
||||
self.assertTrue(cnt.gluster_check_object_creation(req, 'dir/.'))
|
||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
||||
#retained this test case as we may add some other header to
|
||||
#unsupported list in future
|
||||
raise SkipTest
|
||||
req.headers = ['x-delete-at']
|
||||
self.assertTrue(cnt.gluster_check_object_creation(req, 'dir/z'))
|
||||
|
@ -91,6 +91,7 @@ export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||
|
||||
# Install the configuration files
|
||||
sudo mkdir /etc/swift > /dev/null 2>&1
|
||||
sudo cp -r test/functional_auth/common_conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
sudo cp -r test/functional_auth/gswauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
sudo_env gluster-swift-gen-builders test test2 gsmetadata || fail "Unable to create ring files"
|
||||
|
||||
|
@ -64,6 +64,7 @@ export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||
|
||||
# Install the configuration files
|
||||
sudo mkdir /etc/swift > /dev/null 2>&1
|
||||
sudo cp -r test/functional_auth/common_conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
sudo cp -r test/functional_auth/tempauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
sudo_env gluster-swift-gen-builders test test2 || fail "Unable to create ring files"
|
||||
|
||||
@ -71,13 +72,15 @@ sudo_env gluster-swift-gen-builders test test2 || fail "Unable to create ring fi
|
||||
sudo service memcached start || fail "Unable to start memcached"
|
||||
sudo_env swift-init main start || fail "Unable to start swift"
|
||||
|
||||
echo "Running functional tests with tempauth"
|
||||
|
||||
mkdir functional_tests > /dev/null 2>&1
|
||||
nosetests -v --exe \
|
||||
--with-xunit \
|
||||
--xunit-file functional_tests/gluster-swift-generic-functional-TC-report.xml \
|
||||
--with-html-output \
|
||||
--html-out-file functional_tests/gluster-swift-generic-functional-result.html \
|
||||
test/functional || fail "Functional tests failed"
|
||||
--with-html-output \
|
||||
--html-out-file functional_tests/gluster-swift-generic-functional-result.html \
|
||||
test/functional || fail "Functional tests failed"
|
||||
|
||||
cleanup
|
||||
exit 0
|
Loading…
x
Reference in New Issue
Block a user