From ebddcb6d8f7cd216a1fc400a1d0fb195bbbd1ec3 Mon Sep 17 00:00:00 2001
From: Luis Pabon <lpabon@redhat.com>
Date: Thu, 22 May 2014 14:55:16 -0400
Subject: [PATCH] Tests to verify locally deployed software.

The difference between these tests and those under the functional
directory, is that these tests do not run under tox, and so
require a full OpenStack Swift + swiftonfile deployment.

The conf files where copies of the gswauth test, but I modified them
to be able to do a mount check.

The purpose of this tests is to confirm the quality of the software
in a deployed system.

The tests require the following:
- GlusterFS Volumes: test, test2, gsmetadata
- Directory /mnt/gluster-object
- OpenStack Swift + swiftonfile to be installed.

Signed-off-by: Luis Pabon <lpabon@redhat.com>
---
 .../deploy/glusterfs/conf/account-server.conf |  32 ++++++
 .../glusterfs/conf/container-server.conf      |  35 ++++++
 test/deploy/glusterfs/conf/fs.conf            |  19 ++++
 .../deploy/glusterfs/conf/object-expirer.conf |  17 +++
 test/deploy/glusterfs/conf/object-server.conf |  48 ++++++++
 test/deploy/glusterfs/conf/proxy-server.conf  |  78 +++++++++++++
 test/deploy/glusterfs/conf/swift.conf         |  85 ++++++++++++++
 test/deploy/glusterfs/conf/test.conf          |  58 ++++++++++
 test/deploy/glusterfs/tests.sh                | 106 ++++++++++++++++++
 9 files changed, 478 insertions(+)
 create mode 100644 test/deploy/glusterfs/conf/account-server.conf
 create mode 100644 test/deploy/glusterfs/conf/container-server.conf
 create mode 100644 test/deploy/glusterfs/conf/fs.conf
 create mode 100644 test/deploy/glusterfs/conf/object-expirer.conf
 create mode 100644 test/deploy/glusterfs/conf/object-server.conf
 create mode 100644 test/deploy/glusterfs/conf/proxy-server.conf
 create mode 100644 test/deploy/glusterfs/conf/swift.conf
 create mode 100644 test/deploy/glusterfs/conf/test.conf
 create mode 100755 test/deploy/glusterfs/tests.sh

diff --git a/test/deploy/glusterfs/conf/account-server.conf b/test/deploy/glusterfs/conf/account-server.conf
new file mode 100644
index 0000000..ca7f40f
--- /dev/null
+++ b/test/deploy/glusterfs/conf/account-server.conf
@@ -0,0 +1,32 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the account-server workers start,
+# you can *consider* setting this value to "false" to reduce the per-request
+# overhead it can incur.
+#
+# *** Keep false for Functional Tests ***
+mount_check = true
+bind_port = 6012
+#
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+#
+# One or two workers should be sufficient for almost any installation of
+# Gluster.
+workers = 1
+
+[pipeline:main]
+pipeline = account-server
+
+[app:account-server]
+use = egg:gluster_swift#account
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+#
+# After ensuring things are running in a stable manner, you can turn off
+# normal request logging for the account server to unclutter the log
+# files. Warnings and errors will still be logged.
+log_requests = off
diff --git a/test/deploy/glusterfs/conf/container-server.conf b/test/deploy/glusterfs/conf/container-server.conf
new file mode 100644
index 0000000..2c6cbe4
--- /dev/null
+++ b/test/deploy/glusterfs/conf/container-server.conf
@@ -0,0 +1,35 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the container-server workers
+# start, you can *consider* setting this value to "false" to reduce the
+# per-request overhead it can incur.
+#
+# *** Keep false for Functional Tests ***
+mount_check = true
+bind_port = 6011
+#
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+#
+# One or two workers should be sufficient for almost any installation of
+# Gluster.
+workers = 1
+
+[pipeline:main]
+pipeline = container-server
+
+[app:container-server]
+use = egg:gluster_swift#container
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+#
+# After ensuring things are running in a stable manner, you can turn off
+# normal request logging for the container server to unclutter the log
+# files. Warnings and errors will still be logged.
+log_requests = off
+
+#enable object versioning for functional test
+allow_versions = on
diff --git a/test/deploy/glusterfs/conf/fs.conf b/test/deploy/glusterfs/conf/fs.conf
new file mode 100644
index 0000000..b06a854
--- /dev/null
+++ b/test/deploy/glusterfs/conf/fs.conf
@@ -0,0 +1,19 @@
+[DEFAULT]
+#
+# IP address of a node in the GlusterFS server cluster hosting the
+# volumes to be served via Swift API.
+mount_ip = localhost
+
+# Performance optimization parameter. When turned off, the filesystem will
+# see a reduced number of stat calls, resulting in substantially faster
+# response time for GET and HEAD container requests on containers with large
+# numbers of objects, at the expense of an accurate count of combined bytes
+# used by all objects in the container. For most installations "off" works
+# fine.
+#
+# *** Keep on for Functional Tests ***
+accurate_size_in_listing = on
+
+# *** Keep on for Functional Tests ***
+container_update_object_count = on
+account_update_container_count = on
diff --git a/test/deploy/glusterfs/conf/object-expirer.conf b/test/deploy/glusterfs/conf/object-expirer.conf
new file mode 100644
index 0000000..b75963c
--- /dev/null
+++ b/test/deploy/glusterfs/conf/object-expirer.conf
@@ -0,0 +1,17 @@
+[DEFAULT]
+
+[object-expirer]
+# auto_create_account_prefix = .
+
+[pipeline:main]
+pipeline = catch_errors cache proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+
+[filter:cache]
+use = egg:swift#memcache
+memcache_servers = 127.0.0.1:11211
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
diff --git a/test/deploy/glusterfs/conf/object-server.conf b/test/deploy/glusterfs/conf/object-server.conf
new file mode 100644
index 0000000..c219c14
--- /dev/null
+++ b/test/deploy/glusterfs/conf/object-server.conf
@@ -0,0 +1,48 @@
+[DEFAULT]
+devices = /mnt/gluster-object
+#
+# Once you are confident that your startup processes will always have your
+# gluster volumes properly mounted *before* the object-server workers start,
+# you can *consider* setting this value to "false" to reduce the per-request
+# overhead it can incur.
+#
+# *** Keep false for Functional Tests ***
+mount_check = true
+bind_port = 6010
+#
+# Maximum number of clients one worker can process simultaneously (it will
+# actually accept N + 1). Setting this to one (1) will only handle one request
+# at a time, without accepting another request concurrently. By increasing the
+# number of workers to a much higher value, one can prevent slow file system
+# operations for one request from starving other requests.
+max_clients = 1024
+#
+# If not doing the above, setting this value initially to match the number of
+# CPUs is a good starting point for determining the right value.
+workers = 1
+# Override swift's default behaviour for fallocate.
+disable_fallocate = true
+
+[pipeline:main]
+pipeline = object-server
+
+[app:object-server]
+use = egg:gluster_swift#object
+user = root
+log_facility = LOG_LOCAL2
+log_level = WARN
+#
+# For performance, after ensuring things are running in a stable manner, you
+# can turn off normal request logging for the object server to reduce the
+# per-request overhead and unclutter the log files. Warnings and errors will
+# still be logged.
+log_requests = off
+#
+# Adjust this value to match the stripe width of the underlying storage array
+# (not the stripe element size). This will provide a reasonable starting point
+# for tuning this value.
+disk_chunk_size = 65536
+#
+# Adjust this value match whatever is set for the disk_chunk_size initially.
+# This will provide a reasonable starting point for tuning this value.
+network_chunk_size = 65556
diff --git a/test/deploy/glusterfs/conf/proxy-server.conf b/test/deploy/glusterfs/conf/proxy-server.conf
new file mode 100644
index 0000000..165cb0c
--- /dev/null
+++ b/test/deploy/glusterfs/conf/proxy-server.conf
@@ -0,0 +1,78 @@
+[DEFAULT]
+bind_port = 8080
+user = root
+# Consider using 1 worker per CPU
+workers = 1
+
+[pipeline:main]
+pipeline = catch_errors healthcheck proxy-logging cache gswauth proxy-logging proxy-server
+
+[app:proxy-server]
+use = egg:gluster_swift#proxy
+log_facility = LOG_LOCAL1
+log_level = WARN
+# The API allows for account creation and deletion, but since Gluster/Swift
+# automounts a Gluster volume for a given account, there is no way to create
+# or delete an account. So leave this off.
+allow_account_management = false
+account_autocreate = true
+# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
+# of the entire object given that all metadata is stored in the object
+# extended attributes (no .meta file used after creation) and no container
+# sync feature to present.
+object_post_as_copy = false
+# Only need to recheck the account exists once a day
+recheck_account_existence = 86400
+# May want to consider bumping this up if containers are created and destroyed
+# infrequently.
+recheck_container_existence = 60
+# Timeout clients that don't read or write to the proxy server after 5
+# seconds.
+client_timeout = 5
+# Give more time to connect to the object, container or account servers in
+# cases of high load.
+conn_timeout = 5
+# For high load situations, once connected to an object, container or account
+# server, allow for delays communicating with them.
+node_timeout = 60
+# May want to consider bumping up this value to 1 - 4 MB depending on how much
+# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
+# stripe width (not stripe element size) of your storage volume is a good
+# starting point. See below for sizing information.
+object_chunk_size = 65536
+# If you do decide to increase the object_chunk_size, then consider lowering
+# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
+# be queued to the object server for processing. Given one proxy server worker
+# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
+# * 1,024 bytes of memory in the worse case (default values). Be sure the
+# amount of memory available on the system can accommodate increased values
+# for object_chunk_size.
+put_queue_depth = 10
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:tempauth]
+use = egg:swift#tempauth
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+
+[filter:gswauth]
+use = egg:gluster_swift#gswauth
+set log_name = gswauth
+super_admin_key = gswauthkey
+metadata_volume = gsmetadata
+
+[filter:cache]
+use = egg:swift#memcache
+# Update this line to contain a comma separated list of memcache servers
+# shared by all nodes running the proxy-server service.
+memcache_servers = localhost:11211
diff --git a/test/deploy/glusterfs/conf/swift.conf b/test/deploy/glusterfs/conf/swift.conf
new file mode 100644
index 0000000..f64ba5a
--- /dev/null
+++ b/test/deploy/glusterfs/conf/swift.conf
@@ -0,0 +1,85 @@
+[DEFAULT]
+
+
+[swift-hash]
+# random unique string that can never change (DO NOT LOSE)
+swift_hash_path_suffix = gluster
+
+
+# The swift-constraints section sets the basic constraints on data
+# saved in the swift cluster.
+
+[swift-constraints]
+
+# max_file_size is the largest "normal" object that can be saved in
+# the cluster. This is also the limit on the size of each segment of
+# a "large" object when using the large object manifest support.
+# This value is set in bytes. Setting it to lower than 1MiB will cause
+# some tests to fail.
+# Default is 1 TiB = 2**30*1024
+max_file_size = 1099511627776
+
+
+# max_meta_name_length is the max number of bytes in the utf8 encoding
+# of the name portion of a metadata header.
+
+#max_meta_name_length = 128
+
+
+# max_meta_value_length is the max number of bytes in the utf8 encoding
+# of a metadata value
+
+#max_meta_value_length = 256
+
+
+# max_meta_count is the max number of metadata keys that can be stored
+# on a single account, container, or object
+
+#max_meta_count = 90
+
+
+# max_meta_overall_size is the max number of bytes in the utf8 encoding
+# of the metadata (keys + values)
+
+#max_meta_overall_size = 4096
+
+
+# max_object_name_length is the max number of bytes in the utf8 encoding of an
+# object name: Gluster FS can handle much longer file names, but the length
+# between the slashes of the URL is handled below. Remember that most web
+# clients can't handle anything greater than 2048, and those that do are
+# rather clumsy.
+
+max_object_name_length = 2048
+
+# max_object_name_component_length (GlusterFS) is the max number of bytes in
+# the utf8 encoding of an object name component (the part between the
+# slashes); this is a limit imposed by the underlying file system (for XFS it
+# is 255 bytes).
+
+max_object_name_component_length = 255
+
+# container_listing_limit is the default (and max) number of items
+# returned for a container listing request
+
+#container_listing_limit = 10000
+
+
+# account_listing_limit is the default (and max) number of items returned
+# for an account listing request
+
+#account_listing_limit = 10000
+
+
+# max_account_name_length is the max number of bytes in the utf8 encoding of
+# an account name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_account_name_length = 255
+
+
+# max_container_name_length is the max number of bytes in the utf8 encoding
+# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
+# size as max_object_name_component_length above.
+
+max_container_name_length = 255
diff --git a/test/deploy/glusterfs/conf/test.conf b/test/deploy/glusterfs/conf/test.conf
new file mode 100644
index 0000000..15c9aea
--- /dev/null
+++ b/test/deploy/glusterfs/conf/test.conf
@@ -0,0 +1,58 @@
+[func_test]
+# sample config
+auth_host = 127.0.0.1
+auth_port = 8080
+auth_ssl = no
+auth_prefix = /auth/
+## sample config for Swift with Keystone
+#auth_version = 2
+#auth_host = localhost
+#auth_port = 5000
+#auth_ssl = no
+#auth_prefix = /v2.0/
+
+# GSWauth internal admin user configuration information
+admin_key = gswauthkey
+admin_user = .super_admin
+
+# Gluster setup information
+devices = /mnt/gluster-object
+gsmetadata_volume = gsmetadata
+
+# Primary functional test account (needs admin access to the account)
+account = test
+username = tester
+password = testing
+
+# User on a second account (needs admin access to the account)
+account2 = test2
+username2 = tester2
+password2 = testing2
+
+# User on same account as first, but without admin access
+username3 = tester3
+password3 = testing3
+
+# Default constraints if not defined here, the test runner will try
+# to set them from /etc/swift/swift.conf. If that file isn't found,
+# the test runner will skip tests that depend on these values.
+# Note that the cluster must have "sane" values for the test suite to pass.
+#max_file_size = 5368709122
+#max_meta_name_length = 128
+#max_meta_value_length = 256
+#max_meta_count = 90
+#max_meta_overall_size = 4096
+#max_object_name_length = 1024
+#container_listing_limit = 10000
+#account_listing_limit = 10000
+#max_account_name_length = 256
+#max_container_name_length = 256
+normalized_urls = True
+
+collate = C
+
+[unit_test]
+fake_syslog = False
+
+[probe_test]
+# check_server_timeout = 30
diff --git a/test/deploy/glusterfs/tests.sh b/test/deploy/glusterfs/tests.sh
new file mode 100755
index 0000000..ee1b683
--- /dev/null
+++ b/test/deploy/glusterfs/tests.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+
+# Copyright (c) 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This program expects to be run against a locally deployed swiftonfile
+# applicatoin.  This tests also expects three glusterfs volumes to have
+# been created: 'test', 'test2', and 'gsmetadata'.
+
+cleanup()
+{
+    service memcached stop
+    swift-init main stop
+    if [ -x /etc/swift.bak ] ; then
+        rm -rf /etc/swift > /dev/null 2>&1
+        mv /etc/swift.bak /etc/swift > /dev/null 2>&1
+    fi
+    rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
+    setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
+    gswauth_cleanup
+}
+
+gswauth_cleanup()
+{
+    rm -rf /mnt/gluster-object/gsmetadata/.* > /dev/null 2>&1
+    rm -rf /mnt/gluster-object/gsmetadata/* > /dev/null 2>&1
+    setfattr -x user.swift.metadata /mnt/gluster-object/gsmetadata > /dev/null 2>&1
+}
+
+quit()
+{
+    echo "$1"
+    exit 1
+}
+
+
+fail()
+{
+    cleanup
+    quit "$1"
+}
+
+run_generic_tests()
+{
+    # clean up gsmetadata dir
+    gswauth_cleanup
+
+    #swauth-prep
+    gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
+    gswauth-add-user -K gswauthkey -a test tester testing || fail "Unable to add user test"
+    gswauth-add-user -K gswauthkey -a test2 tester2 testing2 || fail "Unable to add user test2"
+    gswauth-add-user -K gswauthkey test tester3 testing3 || fail "Unable to add user test3"
+
+    nosetests -v --exe \
+        --with-xunit \
+        --xunit-file functional_tests/gluster-swift-gswauth-generic-functional-TC-report.xml \
+        test/functional || fail "Functional tests failed"
+    nosetests -v --exe \
+        --with-xunit \
+        --xunit-file functional_tests/gluster-swift-gswauth-functionalnosetests-TC-report.xml \
+        test/functionalnosetests || fail "Functional-nose tests failed"
+}
+
+### MAIN ###
+
+# Backup the swift directory if it already exists
+if [ -x /etc/swift ] ; then
+    mv /etc/swift /etc/swift.bak
+fi
+
+export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
+
+# Install the configuration files
+mkdir /etc/swift > /dev/null 2>&1
+cp -r test/deploy/glusterfs/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
+gluster-swift-gen-builders test test2 gsmetadata || fail "Unable to create ring files"
+
+# Start the services
+service memcached start || fail "Unable to start memcached"
+swift-init main start || fail "Unable to start swift"
+
+#swauth-prep
+gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
+
+mkdir functional_tests > /dev/null 2>&1
+nosetests -v --exe \
+    --with-xunit \
+    --xunit-file functional_tests/gluster-swift-gswauth-functional-TC-report.xml \
+    test/functional_auth/gswauth || fail "Functional gswauth test failed"
+
+run_generic_tests
+
+cleanup
+exit 0