Support Storage Policy - Rev 2

* Fix API inconsistencies
* Remove hard-coded reseller_prefix = AUTH_
* Remove unused methods from gluster/swift/common/utils.py
  These used to be called from DiskDir earlier.
* Remove hidden configuration option that were related to account
  and container listings and metadata.
* Remove unused and redundant attributes

Signed-off-by: Prashanth Pai <ppai@redhat.com>
This commit is contained in:
Prashanth Pai 2014-06-02 17:49:15 +05:30
parent 7ab8e35ff6
commit 33e8e2312e
8 changed files with 35 additions and 634 deletions

View File

@ -32,12 +32,7 @@ _fs_conf = ConfigParser()
MOUNT_IP = 'localhost' MOUNT_IP = 'localhost'
RUN_DIR = '/var/run/swift' RUN_DIR = '/var/run/swift'
SWIFT_DIR = '/etc/swift' SWIFT_DIR = '/etc/swift'
_do_getsize = False
_allow_mount_per_server = False _allow_mount_per_server = False
_implicit_dir_objects = False
_container_update_object_count = False
_account_update_container_count = False
_ignore_unsupported_headers = False
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')): if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
try: try:
@ -49,13 +44,6 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
except (NoSectionError, NoOptionError): except (NoSectionError, NoOptionError):
pass pass
try:
_do_getsize = _fs_conf.get('DEFAULT',
'accurate_size_in_listing',
"no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
try: try:
_allow_mount_per_server = _fs_conf.get('DEFAULT', _allow_mount_per_server = _fs_conf.get('DEFAULT',
'allow_mount_per_server', 'allow_mount_per_server',
@ -64,55 +52,6 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
except (NoSectionError, NoOptionError): except (NoSectionError, NoOptionError):
pass pass
# -- Hidden configuration option --
# Report gratuitously created directories as objects
# Directories can be gratuitously created on the path to a given
# object. This option turn on or off the reporting of those directories.
# It defaults to False so that only those directories explicitly
# created by the object server PUT REST API are reported
try:
_implicit_dir_objects = \
_fs_conf.get('DEFAULT',
'implicit_dir_objects',
"no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
# -- Hidden configuration option --
# Due to the impact on performance, this option is disabled by default
try:
_container_update_object_count = \
_fs_conf.get('DEFAULT',
'container_update_object_count',
"no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
# -- Hidden configuration option --
# Due to the impact on performance, this option is disabled by default
try:
_account_update_container_count = \
_fs_conf.get('DEFAULT',
'account_update_container_count',
"no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
# -- Hidden configuration option --
# Ignore unsupported headers and allow them in a request without
# returning a 400-BadRequest. This setting can be set to
# allow unsupported headers such as X-Delete-At and
# X-Delete-After even though they will not be used.
try:
_ignore_unsupported_headers = \
_fs_conf.get('DEFAULT',
'ignore_unsupported_headers',
"no") in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
NAME = 'glusterfs'
def _busy_wait(full_mount_path): def _busy_wait(full_mount_path):
# Iterate for definite number of time over a given # Iterate for definite number of time over a given

View File

@ -22,7 +22,6 @@ import swift.common.constraints
from gluster.swift.common import Glusterfs from gluster.swift.common import Glusterfs
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255 MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
UNSUPPORTED_HEADERS = []
def set_object_name_component_length(len=None): def set_object_name_component_length(len=None):
@ -54,43 +53,11 @@ def validate_obj_name_component(obj):
return 'cannot be . or ..' return 'cannot be . or ..'
return '' return ''
def validate_headers(req):
"""
Validate client header requests
:param req: Http request
"""
if not Glusterfs._ignore_unsupported_headers:
for unsupported_header in UNSUPPORTED_HEADERS:
if unsupported_header in req.headers:
return '%s headers are not supported' \
% ','.join(UNSUPPORTED_HEADERS)
return ''
# Save the original check object creation # Save the original check object creation
__check_object_creation = swift.common.constraints.check_object_creation __check_object_creation = swift.common.constraints.check_object_creation
__check_metadata = swift.common.constraints.check_metadata __check_metadata = swift.common.constraints.check_metadata
def gluster_check_metadata(req, target_type, POST=True):
"""
:param req: HTTP request object
:param target_type: Value from POST passed to __check_metadata
:param POST: Only call __check_metadata on POST since Swift only
calls check_metadata on POSTs.
"""
ret = None
if POST:
ret = __check_metadata(req, target_type)
if ret is None:
bdy = validate_headers(req)
if bdy:
ret = HTTPBadRequest(body=bdy,
request=req,
content_type='text/plain')
return ret
# Define our new one which invokes the original # Define our new one which invokes the original
def gluster_check_object_creation(req, object_name): def gluster_check_object_creation(req, object_name):
""" """
@ -118,14 +85,10 @@ def gluster_check_object_creation(req, object_name):
ret = HTTPBadRequest(body=bdy, ret = HTTPBadRequest(body=bdy,
request=req, request=req,
content_type='text/plain') content_type='text/plain')
if ret is None:
ret = gluster_check_metadata(req, 'object', POST=False)
return ret return ret
# Replace the original checks with ours # Replace the original checks with ours
swift.common.constraints.check_object_creation = gluster_check_object_creation swift.common.constraints.check_object_creation = gluster_check_object_creation
swift.common.constraints.check_metadata = gluster_check_metadata
# Replace the original check mount with ours # Replace the original check mount with ours
swift.common.constraints.check_mount = Glusterfs.mount swift.common.constraints.check_mount = Glusterfs.mount

View File

@ -23,31 +23,22 @@ from eventlet import sleep
import cPickle as pickle import cPickle as pickle
from gluster.swift.common.exceptions import GlusterFileSystemIOError from gluster.swift.common.exceptions import GlusterFileSystemIOError
from swift.common.exceptions import DiskFileNoSpace from swift.common.exceptions import DiskFileNoSpace
from gluster.swift.common.fs_utils import do_getctime, do_getmtime, do_stat, \ from gluster.swift.common.fs_utils import do_stat, \
do_listdir, do_walk, do_rmdir, do_log_rl, get_filename_from_fd, do_open, \ do_walk, do_rmdir, do_log_rl, get_filename_from_fd, do_open, \
do_isdir, do_getsize, do_getxattr, do_setxattr, do_removexattr, do_read, \ do_getxattr, do_setxattr, do_removexattr, do_read, \
do_close, do_dup, do_lseek, do_fstat, do_fsync, do_rename do_close, do_dup, do_lseek, do_fstat, do_fsync, do_rename
from gluster.swift.common import Glusterfs
X_CONTENT_TYPE = 'Content-Type' X_CONTENT_TYPE = 'Content-Type'
X_CONTENT_LENGTH = 'Content-Length' X_CONTENT_LENGTH = 'Content-Length'
X_TIMESTAMP = 'X-Timestamp' X_TIMESTAMP = 'X-Timestamp'
X_PUT_TIMESTAMP = 'X-PUT-Timestamp'
X_TYPE = 'X-Type' X_TYPE = 'X-Type'
X_ETAG = 'ETag' X_ETAG = 'ETag'
X_OBJECTS_COUNT = 'X-Object-Count'
X_BYTES_USED = 'X-Bytes-Used'
X_CONTAINER_COUNT = 'X-Container-Count'
X_OBJECT_TYPE = 'X-Object-Type' X_OBJECT_TYPE = 'X-Object-Type'
DIR_TYPE = 'application/directory' DIR_TYPE = 'application/directory'
ACCOUNT = 'Account'
METADATA_KEY = 'user.swift.metadata' METADATA_KEY = 'user.swift.metadata'
MAX_XATTR_SIZE = 65536 MAX_XATTR_SIZE = 65536
CONTAINER = 'container'
DIR_NON_OBJECT = 'dir' DIR_NON_OBJECT = 'dir'
DIR_OBJECT = 'marker_dir' DIR_OBJECT = 'marker_dir'
TEMP_DIR = 'tmp'
ASYNCDIR = 'async_pending' # Keep in sync with swift.obj.server.ASYNCDIR
FILE = 'file' FILE = 'file'
FILE_TYPE = 'application/octet-stream' FILE_TYPE = 'application/octet-stream'
OBJECT = 'Object' OBJECT = 'Object'
@ -170,49 +161,6 @@ def clean_metadata(path_or_fd):
key += 1 key += 1
def validate_container(metadata):
if not metadata:
logging.warn('validate_container: No metadata')
return False
if X_TYPE not in metadata.keys() or \
X_TIMESTAMP not in metadata.keys() or \
X_PUT_TIMESTAMP not in metadata.keys() or \
X_OBJECTS_COUNT not in metadata.keys() or \
X_BYTES_USED not in metadata.keys():
return False
(value, timestamp) = metadata[X_TYPE]
if value == CONTAINER:
return True
logging.warn('validate_container: metadata type is not CONTAINER (%r)',
value)
return False
def validate_account(metadata):
if not metadata:
logging.warn('validate_account: No metadata')
return False
if X_TYPE not in metadata.keys() or \
X_TIMESTAMP not in metadata.keys() or \
X_PUT_TIMESTAMP not in metadata.keys() or \
X_OBJECTS_COUNT not in metadata.keys() or \
X_BYTES_USED not in metadata.keys() or \
X_CONTAINER_COUNT not in metadata.keys():
return False
(value, timestamp) = metadata[X_TYPE]
if value == ACCOUNT:
return True
logging.warn('validate_account: metadata type is not ACCOUNT (%r)',
value)
return False
def validate_object(metadata): def validate_object(metadata):
if not metadata: if not metadata:
return False return False
@ -233,86 +181,6 @@ def validate_object(metadata):
return False return False
def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
bytes_used=0, obj_list=[]):
# strip the prefix off, also stripping the leading and trailing slashes
obj_path = path.replace(cont_path, '').strip(os.path.sep)
for obj_name in src_list:
# If it is not a reg_file then it is a directory.
if not reg_file and not Glusterfs._implicit_dir_objects:
# Now check if this is a dir object or a gratuiously crated
# directory
metadata = \
read_metadata(os.path.join(cont_path, obj_path, obj_name))
if not dir_is_object(metadata):
continue
if obj_path:
obj_list.append(os.path.join(obj_path, obj_name))
else:
obj_list.append(obj_name)
object_count += 1
if reg_file and Glusterfs._do_getsize:
bytes_used += do_getsize(os.path.join(path, obj_name))
sleep()
return object_count, bytes_used
def update_list(path, cont_path, dirs=[], files=[], object_count=0,
bytes_used=0, obj_list=[]):
if files:
object_count, bytes_used = _update_list(path, cont_path, files, True,
object_count, bytes_used,
obj_list)
if dirs:
object_count, bytes_used = _update_list(path, cont_path, dirs, False,
object_count, bytes_used,
obj_list)
return object_count, bytes_used
def get_container_details(cont_path):
"""
get container details by traversing the filesystem
"""
bytes_used = 0
object_count = 0
obj_list = []
if do_isdir(cont_path):
for (path, dirs, files) in do_walk(cont_path):
object_count, bytes_used = update_list(path, cont_path, dirs,
files, object_count,
bytes_used, obj_list)
sleep()
return obj_list, object_count, bytes_used
def get_account_details(acc_path):
"""
Return container_list and container_count.
"""
container_list = []
container_count = 0
if do_isdir(acc_path):
for name in do_listdir(acc_path):
if name.lower() == TEMP_DIR \
or name.lower() == ASYNCDIR \
or not do_isdir(os.path.join(acc_path, name)):
continue
container_count += 1
container_list.append(name)
return container_list, container_count
def _read_for_etag(fp): def _read_for_etag(fp):
etag = md5() etag = md5()
while True: while True:
@ -382,49 +250,6 @@ def get_object_metadata(obj_path_or_fd):
return metadata return metadata
def _add_timestamp(metadata_i):
# At this point we have a simple key/value dictionary, turn it into
# key/(value,timestamp) pairs.
timestamp = 0
metadata = {}
for key, value_i in metadata_i.iteritems():
if not isinstance(value_i, tuple):
metadata[key] = (value_i, timestamp)
else:
metadata[key] = value_i
return metadata
def get_container_metadata(cont_path):
objects = []
object_count = 0
bytes_used = 0
objects, object_count, bytes_used = get_container_details(cont_path)
metadata = {X_TYPE: CONTAINER,
X_TIMESTAMP: normalize_timestamp(
do_getctime(cont_path)),
X_PUT_TIMESTAMP: normalize_timestamp(
do_getmtime(cont_path)),
X_OBJECTS_COUNT: object_count,
X_BYTES_USED: bytes_used}
return _add_timestamp(metadata)
def get_account_metadata(acc_path):
containers = []
container_count = 0
containers, container_count = get_account_details(acc_path)
metadata = {X_TYPE: ACCOUNT,
X_TIMESTAMP: normalize_timestamp(
do_getctime(acc_path)),
X_PUT_TIMESTAMP: normalize_timestamp(
do_getmtime(acc_path)),
X_OBJECTS_COUNT: 0,
X_BYTES_USED: 0,
X_CONTAINER_COUNT: container_count}
return _add_timestamp(metadata)
def restore_metadata(path, metadata): def restore_metadata(path, metadata):
meta_orig = read_metadata(path) meta_orig = read_metadata(path)
if meta_orig: if meta_orig:
@ -445,18 +270,6 @@ def create_object_metadata(obj_path_or_fd):
return restore_metadata(obj_path_or_fd, metadata) return restore_metadata(obj_path_or_fd, metadata)
def create_container_metadata(cont_path):
metadata = get_container_metadata(cont_path)
rmd = restore_metadata(cont_path, metadata)
return rmd
def create_account_metadata(acc_path):
metadata = get_account_metadata(acc_path)
rmd = restore_metadata(acc_path, metadata)
return rmd
# The following dir_xxx calls should definitely be replaced # The following dir_xxx calls should definitely be replaced
# with a Metadata class to encapsulate their implementation. # with a Metadata class to encapsulate their implementation.
# :FIXME: For now we have them as functions, but we should # :FIXME: For now we have them as functions, but we should

View File

@ -23,7 +23,6 @@ except ImportError:
import random import random
import logging import logging
import time import time
from collections import defaultdict
from socket import gethostname from socket import gethostname
from hashlib import md5 from hashlib import md5
from eventlet import sleep from eventlet import sleep
@ -31,8 +30,8 @@ from greenlet import getcurrent
from contextlib import contextmanager from contextlib import contextmanager
from gluster.swift.common.exceptions import AlreadyExistsAsFile, \ from gluster.swift.common.exceptions import AlreadyExistsAsFile, \
AlreadyExistsAsDir AlreadyExistsAsDir
from swift.common.utils import TRUE_VALUES, ThreadPool, config_true_value, \ from swift.common.utils import TRUE_VALUES, ThreadPool, hash_path, \
hash_path, normalize_timestamp normalize_timestamp
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \ from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen, \ DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen, \
DiskFileExpired DiskFileExpired
@ -51,19 +50,13 @@ from gluster.swift.common.utils import X_CONTENT_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \ FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \
X_ETAG, X_CONTENT_LENGTH X_ETAG, X_CONTENT_LENGTH
from ConfigParser import ConfigParser, NoSectionError, NoOptionError from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.obj.diskfile import get_async_dir
from swift.obj.diskfile import DiskFileManager as SwiftDiskFileManager from swift.obj.diskfile import DiskFileManager as SwiftDiskFileManager
from swift.obj.diskfile import get_async_dir
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will # FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
# be back ported. See http://www.python.org/dev/peps/pep-0433/ # be back ported. See http://www.python.org/dev/peps/pep-0433/
O_CLOEXEC = 02000000 O_CLOEXEC = 02000000
DEFAULT_DISK_CHUNK_SIZE = 65536
DEFAULT_KEEP_CACHE_SIZE = (5 * 1024 * 1024)
DEFAULT_MB_PER_SYNC = 512
# keep these lower-case
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
MAX_RENAME_ATTEMPTS = 10 MAX_RENAME_ATTEMPTS = 10
MAX_OPEN_ATTEMPTS = 10 MAX_OPEN_ATTEMPTS = 10
@ -245,6 +238,8 @@ class DiskFileManager(SwiftDiskFileManager):
""" """
def __init__(self, conf, logger): def __init__(self, conf, logger):
super(DiskFileManager, self).__init__(conf, logger) super(DiskFileManager, self).__init__(conf, logger)
self.reseller_prefix = \
conf.get('reseller_prefix', 'AUTH_').strip()
def get_dev_path(self, device, mount_check=None): def get_dev_path(self, device, mount_check=None):
""" """
@ -262,13 +257,29 @@ class DiskFileManager(SwiftDiskFileManager):
dev_path = os.path.join(self.devices, device) dev_path = os.path.join(self.devices, device)
return dev_path return dev_path
def get_diskfile(self, device, account, container, obj, def get_diskfile(self, device, partition, account, container, obj,
policy_idx=0, **kwargs): policy_idx=0, **kwargs):
dev_path = self.get_dev_path(device) dev_path = self.get_dev_path(device)
if not dev_path: if not dev_path:
raise DiskFileDeviceUnavailable() raise DiskFileDeviceUnavailable()
return DiskFile(self, dev_path, self.threadpools[device], return DiskFile(self, dev_path, self.threadpools[device],
account, container, obj, policy_idx, **kwargs) partition, account, container, obj,
policy_idx=policy_idx, **kwargs)
def pickle_async_update(self, device, account, container, obj, data,
timestamp, policy_idx):
# This method invokes swiftonfile's writepickle method.
# Is patching just write_pickle and calling parent method better ?
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, get_async_dir(policy_idx))
ohash = hash_path(account, container, obj)
self.threadpools[device].run_in_thread(
write_pickle,
data,
os.path.join(async_dir, ohash[-3:], ohash + '-' +
normalize_timestamp(timestamp)),
os.path.join(device_path, 'tmp'))
self.logger.increment('async_pendings')
class DiskFileWriter(object): class DiskFileWriter(object):
@ -586,8 +597,10 @@ class DiskFile(object):
:param uid: user ID disk object should assume (file or directory) :param uid: user ID disk object should assume (file or directory)
:param gid: group ID disk object should assume (file or directory) :param gid: group ID disk object should assume (file or directory)
""" """
def __init__(self, mgr, dev_path, threadpool, account, container, obj, def __init__(self, mgr, dev_path, threadpool, partition,
account=None, container=None, obj=None,
policy_idx=0, uid=DEFAULT_UID, gid=DEFAULT_GID): policy_idx=0, uid=DEFAULT_UID, gid=DEFAULT_GID):
# Variables partition and policy_idx is currently unused.
self._mgr = mgr self._mgr = mgr
self._device_path = dev_path self._device_path = dev_path
self._threadpool = threadpool or ThreadPool(nthreads=0) self._threadpool = threadpool or ThreadPool(nthreads=0)
@ -599,10 +612,9 @@ class DiskFile(object):
self._fd = None self._fd = None
# Don't store a value for data_file until we know it exists. # Don't store a value for data_file until we know it exists.
self._data_file = None self._data_file = None
self._policy_idx = int(policy_idx)
if not hasattr(self._mgr, 'reseller_prefix'): # Is this the right thing to do ? The Swift databases include
self._mgr.reseller_prefix = 'AUTH_' # the resller_prefix while storing the account name.
if account.startswith(self._mgr.reseller_prefix): if account.startswith(self._mgr.reseller_prefix):
account = account[len(self._mgr.reseller_prefix):] account = account[len(self._mgr.reseller_prefix):]
self._account = account self._account = account

View File

@ -47,11 +47,9 @@ class ObjectController(server.ObjectController):
# Common on-disk hierarchy shared across account, container and object # Common on-disk hierarchy shared across account, container and object
# servers. # servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger) self._diskfile_mgr = DiskFileManager(conf, self.logger)
self._diskfile_mgr.reseller_prefix = \
conf.get('reseller_prefix', 'AUTH_').strip()
def get_diskfile(self, device, partition, account, container, obj, def get_diskfile(self, device, partition, account, container, obj,
policy_idx=0, **kwargs): policy_idx, **kwargs):
""" """
Utility method for instantiating a DiskFile object supporting a given Utility method for instantiating a DiskFile object supporting a given
REST API. REST API.
@ -60,8 +58,8 @@ class ObjectController(server.ObjectController):
DiskFile class would simply over-ride this method to provide that DiskFile class would simply over-ride this method to provide that
behavior. behavior.
""" """
return self._diskfile_mgr.get_diskfile(device, account, container, obj, return self._diskfile_mgr.get_diskfile(
**kwargs) device, partition, account, container, obj, policy_idx, **kwargs)
@public @public
@timing_stats() @timing_stats()

View File

@ -15,7 +15,6 @@
import unittest import unittest
import swift.common.constraints import swift.common.constraints
from nose import SkipTest
from mock import Mock, patch from mock import Mock, patch
from gluster.swift.common import constraints as cnt from gluster.swift.common import constraints as cnt
@ -75,81 +74,9 @@ class TestConstraints(unittest.TestCase):
self.assertTrue(cnt.validate_obj_name_component('..')) self.assertTrue(cnt.validate_obj_name_component('..'))
self.assertTrue(cnt.validate_obj_name_component('')) self.assertTrue(cnt.validate_obj_name_component(''))
def test_validate_headers(self):
req = Mock()
req.headers = []
self.assertEqual(cnt.validate_headers(req), '')
req.headers = ['x-some-header']
self.assertEqual(cnt.validate_headers(req), '')
#TODO: Although we now support x-delete-at and x-delete-after,
#retained this test case as we may add some other header to
#unsupported list in future
raise SkipTest
req.headers = ['x-delete-at', 'x-some-header']
self.assertNotEqual(cnt.validate_headers(req), '')
req.headers = ['x-delete-after', 'x-some-header']
self.assertNotEqual(cnt.validate_headers(req), '')
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
self.assertNotEqual(cnt.validate_headers(req), '')
def test_validate_headers_ignoring_config_set(self):
with patch('gluster.swift.common.constraints.'
'Glusterfs._ignore_unsupported_headers', True):
req = Mock()
req.headers = []
self.assertEqual(cnt.validate_headers(req), '')
req.headers = ['x-some-header']
self.assertEqual(cnt.validate_headers(req), '')
#TODO: Although we now support x-delete-at and x-delete-after,
#retained this test case as we may add some other header to
#unsupported list in future
raise SkipTest
req.headers = ['x-delete-at', 'x-some-header']
self.assertEqual(cnt.validate_headers(req), '')
req.headers = ['x-delete-after', 'x-some-header']
self.assertEqual(cnt.validate_headers(req), '')
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
self.assertEqual(cnt.validate_headers(req), '')
def test_gluster_check_metadata(self):
mock_check_metadata = Mock()
with patch('gluster.swift.common.constraints.__check_metadata',
mock_check_metadata):
req = Mock()
req.headers = []
cnt.gluster_check_metadata(req, 'object')
self.assertTrue(1, mock_check_metadata.call_count)
cnt.gluster_check_metadata(req, 'object', POST=False)
self.assertTrue(1, mock_check_metadata.call_count)
req.headers = ['x-some-header']
self.assertEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
#TODO: Although we now support x-delete-at and x-delete-after,
#retained this test case as we may add some other header to
#unsupported list in future
raise SkipTest
req.headers = ['x-delete-at', 'x-some-header']
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
req.headers = ['x-delete-after', 'x-some-header']
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
req.headers = ['x-delete-at', 'x-delete-after', 'x-some-header']
self.assertNotEqual(cnt.gluster_check_metadata(req, 'object', POST=False), None)
def test_gluster_check_object_creation(self): def test_gluster_check_object_creation(self):
with patch('gluster.swift.common.constraints.__check_object_creation', with patch('gluster.swift.common.constraints.__check_object_creation',
mock_check_object_creation): mock_check_object_creation):
req = Mock() req = Mock()
req.headers = [] req.headers = []
self.assertFalse(cnt.gluster_check_object_creation(req, 'dir/z')) self.assertFalse(cnt.gluster_check_object_creation(req, 'dir/z'))
def test_gluster_check_object_creation_err(self):
with patch('gluster.swift.common.constraints.__check_object_creation',
mock_check_object_creation):
req = Mock()
req.headers = []
self.assertTrue(cnt.gluster_check_object_creation(req, 'dir/.'))
#TODO: Although we now support x-delete-at and x-delete-after,
#retained this test case as we may add some other header to
#unsupported list in future
raise SkipTest
req.headers = ['x-delete-at']
self.assertTrue(cnt.gluster_check_object_creation(req, 'dir/z'))

View File

@ -321,28 +321,6 @@ class TestUtils(unittest.TestCase):
assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt assert _xattr_op_cnt['get'] == 1, "%r" % _xattr_op_cnt
assert _xattr_op_cnt['set'] == 0, "%r" % _xattr_op_cnt assert _xattr_op_cnt['set'] == 0, "%r" % _xattr_op_cnt
def test_add_timestamp_empty(self):
orig = {}
res = utils._add_timestamp(orig)
assert res == {}
def test_add_timestamp_none(self):
orig = {'a': 1, 'b': 2, 'c': 3}
exp = {'a': (1, 0), 'b': (2, 0), 'c': (3, 0)}
res = utils._add_timestamp(orig)
assert res == exp
def test_add_timestamp_mixed(self):
orig = {'a': 1, 'b': (2, 1), 'c': 3}
exp = {'a': (1, 0), 'b': (2, 1), 'c': (3, 0)}
res = utils._add_timestamp(orig)
assert res == exp
def test_add_timestamp_all(self):
orig = {'a': (1, 0), 'b': (2, 1), 'c': (3, 0)}
res = utils._add_timestamp(orig)
assert res == orig
def test_get_etag_empty(self): def test_get_etag_empty(self):
tf = tempfile.NamedTemporaryFile() tf = tempfile.NamedTemporaryFile()
hd = utils._get_etag(tf.name) hd = utils._get_etag(tf.name)
@ -455,235 +433,6 @@ class TestUtils(unittest.TestCase):
finally: finally:
os.rmdir(td) os.rmdir(td)
def test_get_container_metadata(self):
def _mock_get_container_details(path):
o_list = ['a', 'b', 'c']
o_count = 3
b_used = 47
return o_list, o_count, b_used
orig_gcd = utils.get_container_details
utils.get_container_details = _mock_get_container_details
td = tempfile.mkdtemp()
try:
exp_md = {
utils.X_TYPE: (utils.CONTAINER, 0),
utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_OBJECTS_COUNT: (3, 0),
utils.X_BYTES_USED: (47, 0),
}
md = utils.get_container_metadata(td)
assert md == exp_md
finally:
utils.get_container_details = orig_gcd
os.rmdir(td)
def test_get_account_metadata(self):
def _mock_get_account_details(path):
c_list = ['123', 'abc']
c_count = 2
return c_list, c_count
orig_gad = utils.get_account_details
utils.get_account_details = _mock_get_account_details
td = tempfile.mkdtemp()
try:
exp_md = {
utils.X_TYPE: (utils.ACCOUNT, 0),
utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_OBJECTS_COUNT: (0, 0),
utils.X_BYTES_USED: (0, 0),
utils.X_CONTAINER_COUNT: (2, 0),
}
md = utils.get_account_metadata(td)
assert md == exp_md
finally:
utils.get_account_details = orig_gad
os.rmdir(td)
cont_keys = [utils.X_TYPE, utils.X_TIMESTAMP, utils.X_PUT_TIMESTAMP,
utils.X_OBJECTS_COUNT, utils.X_BYTES_USED]
def test_create_container_metadata(self):
td = tempfile.mkdtemp()
try:
r_md = utils.create_container_metadata(td)
xkey = _xkey(td, utils.METADATA_KEY)
assert len(_xattrs.keys()) == 1
assert xkey in _xattrs
assert _xattr_op_cnt['get'] == 1
assert _xattr_op_cnt['set'] == 1
md = pickle.loads(_xattrs[xkey])
assert r_md == md
for key in self.cont_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == (utils.CONTAINER, 0)
assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_OBJECTS_COUNT] == (0, 0)
assert md[utils.X_BYTES_USED] == (0, 0)
finally:
os.rmdir(td)
acct_keys = [val for val in cont_keys]
acct_keys.append(utils.X_CONTAINER_COUNT)
def test_create_account_metadata(self):
td = tempfile.mkdtemp()
try:
r_md = utils.create_account_metadata(td)
xkey = _xkey(td, utils.METADATA_KEY)
assert len(_xattrs.keys()) == 1
assert xkey in _xattrs
assert _xattr_op_cnt['get'] == 1
assert _xattr_op_cnt['set'] == 1
md = pickle.loads(_xattrs[xkey])
assert r_md == md
for key in self.acct_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == (utils.ACCOUNT, 0)
assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_OBJECTS_COUNT] == (0, 0)
assert md[utils.X_BYTES_USED] == (0, 0)
assert md[utils.X_CONTAINER_COUNT] == (0, 0)
finally:
os.rmdir(td)
def test_get_account_details(self):
orig_cwd = os.getcwd()
td = tempfile.mkdtemp()
try:
tf = tarfile.open("common/data/account_tree.tar.bz2", "r:bz2")
os.chdir(td)
tf.extractall()
container_list, container_count = utils.get_account_details(td)
assert container_count == 3
assert set(container_list) == set(['c1', 'c2', 'c3'])
finally:
os.chdir(orig_cwd)
shutil.rmtree(td)
def test_get_account_details_notadir(self):
tf = tempfile.NamedTemporaryFile()
container_list, container_count = utils.get_account_details(tf.name)
assert container_count == 0
assert container_list == []
def test_get_container_details_notadir(self):
tf = tempfile.NamedTemporaryFile()
obj_list, object_count, bytes_used = \
utils.get_container_details(tf.name)
assert bytes_used == 0
assert object_count == 0
assert obj_list == []
def test_get_container_details(self):
orig_cwd = os.getcwd()
__do_getsize = Glusterfs._do_getsize
td = tempfile.mkdtemp()
try:
tf = tarfile.open("common/data/container_tree.tar.bz2", "r:bz2")
os.chdir(td)
tf.extractall()
Glusterfs._do_getsize = False
obj_list, object_count, bytes_used = \
utils.get_container_details(td)
assert bytes_used == 0, repr(bytes_used)
# Should not include the directories
assert object_count == 5, repr(object_count)
assert set(obj_list) == set(['file1', 'file3', 'file2',
'dir1/file1', 'dir1/file2'
]), repr(obj_list)
finally:
Glusterfs._do_getsize = __do_getsize
os.chdir(orig_cwd)
shutil.rmtree(td)
def test_get_container_details_from_fs_do_getsize_true(self):
orig_cwd = os.getcwd()
__do_getsize = Glusterfs._do_getsize
td = tempfile.mkdtemp()
try:
tf = tarfile.open("common/data/container_tree.tar.bz2", "r:bz2")
os.chdir(td)
tf.extractall()
Glusterfs._do_getsize = True
obj_list, object_count, bytes_used = \
utils.get_container_details(td)
assert bytes_used == 30, repr(bytes_used)
assert object_count == 5, repr(object_count)
assert set(obj_list) == set(['file1', 'file3', 'file2',
'dir1/file1', 'dir1/file2'
]), repr(obj_list)
finally:
Glusterfs._do_getsize = __do_getsize
os.chdir(orig_cwd)
shutil.rmtree(td)
def test_validate_container_empty(self):
ret = utils.validate_container({})
assert not ret
def test_validate_container_missing_keys(self):
ret = utils.validate_container({'foo': 'bar'})
assert not ret
def test_validate_container_bad_type(self):
md = {utils.X_TYPE: ('bad', 0),
utils.X_TIMESTAMP: ('na', 0),
utils.X_PUT_TIMESTAMP: ('na', 0),
utils.X_OBJECTS_COUNT: ('na', 0),
utils.X_BYTES_USED: ('na', 0)}
ret = utils.validate_container(md)
assert not ret
def test_validate_container_good_type(self):
md = {utils.X_TYPE: (utils.CONTAINER, 0),
utils.X_TIMESTAMP: ('na', 0),
utils.X_PUT_TIMESTAMP: ('na', 0),
utils.X_OBJECTS_COUNT: ('na', 0),
utils.X_BYTES_USED: ('na', 0)}
ret = utils.validate_container(md)
assert ret
def test_validate_account_empty(self):
ret = utils.validate_account({})
assert not ret
def test_validate_account_missing_keys(self):
ret = utils.validate_account({'foo': 'bar'})
assert not ret
def test_validate_account_bad_type(self):
md = {utils.X_TYPE: ('bad', 0),
utils.X_TIMESTAMP: ('na', 0),
utils.X_PUT_TIMESTAMP: ('na', 0),
utils.X_OBJECTS_COUNT: ('na', 0),
utils.X_BYTES_USED: ('na', 0),
utils.X_CONTAINER_COUNT: ('na', 0)}
ret = utils.validate_account(md)
assert not ret
def test_validate_account_good_type(self):
md = {utils.X_TYPE: (utils.ACCOUNT, 0),
utils.X_TIMESTAMP: ('na', 0),
utils.X_PUT_TIMESTAMP: ('na', 0),
utils.X_OBJECTS_COUNT: ('na', 0),
utils.X_BYTES_USED: ('na', 0),
utils.X_CONTAINER_COUNT: ('na', 0)}
ret = utils.validate_account(md)
assert ret
def test_validate_object_empty(self): def test_validate_object_empty(self):
ret = utils.validate_object({}) ret = utils.validate_object({})
assert not ret assert not ret

View File

@ -150,7 +150,7 @@ class TestDiskFile(unittest.TestCase):
shutil.rmtree(self.td) shutil.rmtree(self.td)
def _get_diskfile(self, d, p, a, c, o, **kwargs): def _get_diskfile(self, d, p, a, c, o, **kwargs):
return self.mgr.get_diskfile(d, a, c, o, **kwargs) return self.mgr.get_diskfile(d, p, a, c, o, **kwargs)
def test_constructor_no_slash(self): def test_constructor_no_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z") gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")