test: add support for functional tests

This patch adds an xfstests-style functional test suite.  Run "tox -e tempauth"
to test swift3 with TempAuth, and "tox -e keystone" to test with the Keystone
auth system.  You don't need to prepare Swift and Keystone system for that.
They will be started with minimum configuration automatically.

If you already have a Swift cluster for the functional test, run "./check" in
the test directory directly.  Individual tests can be run using "./check 003",
and various other options are also supported.  Try "./check -h" for more
information.

I added some sample test cases but they are not enough at all obviously.  More
tests will be added soon.

Change-Id: I75abce574768abbe88f60d8c1eee87757651e357
This commit is contained in:
MORITA Kazutaka 2014-06-24 02:20:40 +09:00
parent 1f762cca42
commit a7373a55f5
31 changed files with 2492 additions and 1 deletions

10
swift3/test/functional/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
*.out.bad
*.notrun
*.full
check.log
check.time
scratch
.s3curl
conf/*.conf
htmlcov

12
swift3/test/functional/001 Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
# GET Service
. ./common
S3USER=tester
_s3_put /abcdefghijklmnopqrstuvwxyz_01234
_s3_put /5ABCDEFGHIJKLMNOPQRSTUVWXYZ.5-6789
_s3_get / -D - | _filter_curl xml

View File

@ -0,0 +1,23 @@
QA output created by 001
> s3curl --id tester -- -X PUT http://SWIFT_HOST/abcdefghijklmnopqrstuvwxyz_01234... 200
> s3curl --id tester -- -X PUT http://SWIFT_HOST/5ABCDEFGHIJKLMNOPQRSTUVWXYZ.5-6789... 200
> s3curl --id tester -- -X GET -D - http://SWIFT_HOST/... 200
HTTP/1.1 200 OK
Content-Length: LENGTH
Content-Type: application/xml
Date: DATE
X-Trans-Id: TXID
<?xml version='1.0' encoding='UTF-8'?>
<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Buckets>
<Bucket>
<Name>5ABCDEFGHIJKLMNOPQRSTUVWXYZ.5-6789</Name>
<CreationDate>DATE</CreationDate>
</Bucket>
<Bucket>
<Name>abcdefghijklmnopqrstuvwxyz_01234</Name>
<CreationDate>DATE</CreationDate>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>

14
swift3/test/functional/002 Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
# PUT Bucket
. ./common
echo '
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>US</LocationConstraint>
</CreateBucketConfiguration>' > $tmp.default.xml
S3USER=tester
_s3_put /bucket -D - -T $tmp.default.xml | _filter_curl

View File

@ -0,0 +1,9 @@
QA output created by 002
> s3curl --id tester -- -X PUT -D - -T /TMP.default.xml http://SWIFT_HOST/bucket... 200
HTTP/1.1 200 OK
Content-Length: LENGTH
Content-Type: text/html; charset=UTF-8
Date: DATE
Location: bucket
X-Trans-Id: TXID

16
swift3/test/functional/003 Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
# GET Bucket
. ./common
S3USER=tester
_s3_put /bucket
_s3_put /bucket/sample.jpg -T /dev/null
_s3_put /bucket/photos/2006/January/sample.jpg -T /dev/null
_s3_put /bucket/photos/2006/February/sample2.jpg -T /dev/null
_s3_put /bucket/photos/2006/February/sample3.jpg -T /dev/null
_s3_put /bucket/pho/2006/February/sample4.jpg -T /dev/null
_s3_get /bucket?delimiter=/\&prefix=photos/2006/ -D - | _filter_curl xml

View File

@ -0,0 +1,29 @@
QA output created by 003
> s3curl --id tester -- -X PUT http://SWIFT_HOST/bucket... 200
> s3curl --id tester -- -X PUT -T /dev/null http://SWIFT_HOST/bucket/sample.jpg... 200
> s3curl --id tester -- -X PUT -T /dev/null http://SWIFT_HOST/bucket/photos/2006/January/sample.jpg... 200
> s3curl --id tester -- -X PUT -T /dev/null http://SWIFT_HOST/bucket/photos/2006/February/sample2.jpg... 200
> s3curl --id tester -- -X PUT -T /dev/null http://SWIFT_HOST/bucket/photos/2006/February/sample3.jpg... 200
> s3curl --id tester -- -X PUT -T /dev/null http://SWIFT_HOST/bucket/pho/2006/February/sample4.jpg... 200
> s3curl --id tester -- -X GET -D - http://SWIFT_HOST/bucket?delimiter=/&prefix=photos/2006/... 200
HTTP/1.1 200 OK
Content-Length: LENGTH
Content-Type: application/xml
Date: DATE
X-Trans-Id: TXID
<?xml version='1.0' encoding='UTF-8'?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Prefix>photos/2006/</Prefix>
<Marker/>
<Delimiter>/</Delimiter>
<IsTruncated>false</IsTruncated>
<MaxKeys>1000</MaxKeys>
<Name>bucket</Name>
<CommonPrefixes>
<Prefix>photos/2006/February/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>photos/2006/January/</Prefix>
</CommonPrefixes>
</ListBucketResult>

508
swift3/test/functional/check Executable file
View File

@ -0,0 +1,508 @@
#!/bin/bash
#
# Copyright (C) 2009 Red Hat, Inc.
# Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Control script for QA
#
tmp=/tmp/$$
status=0
needwrap=true
try=0
n_bad=0
bad=""
notrun=""
interrupt=true
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
# generic initialization
export iam=check
cd $(readlink -f $(dirname $0))
# we need common.config
if ! . ./common.config
then
echo "$iam: failed to source common.config"
exit 1
fi
_setenvironment()
{
MSGVERB="text:action"
export MSGVERB
}
here=`pwd`
rm -f $here/$iam.out
_setenvironment
check=${check-true}
diff="diff -u"
verbose=false
group=false
xgroup=false
exit_on_err=false
showme=false
sortme=false
expunge=true
have_test_arg=false
randomize=false
rm -f $tmp.list $tmp.tmp $tmp.sed
for r
do
if $group
then
# arg after -g
group_list=`sed -n <group -e 's/$/ /' -e "/^[0-9][0-9][0-9].* $r /"'{
s/ .*//p
}'`
if [ -z "$group_list" ]
then
echo "Group \"$r\" is empty or not defined?"
exit 1
fi
[ ! -s $tmp.list ] && touch $tmp.list
for t in $group_list
do
if grep -s "^$t\$" $tmp.list >/dev/null
then
:
else
echo "$t" >>$tmp.list
fi
done
group=false
continue
elif $xgroup
then
# arg after -x
[ ! -s $tmp.list ] && ls [0-9][0-9][0-9] [0-9][0-9][0-9][0-9] >$tmp.list 2>/dev/null
group_list=`sed -n <group -e 's/$/ /' -e "/^[0-9][0-9][0-9].* $r /"'{
s/ .*//p
}'`
if [ -z "$group_list" ]
then
echo "Group \"$r\" is empty or not defined?"
exit 1
fi
numsed=0
rm -f $tmp.sed
for t in $group_list
do
if [ $numsed -gt 100 ]
then
sed -f $tmp.sed <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
numsed=0
rm -f $tmp.sed
fi
echo "/^$t\$/d" >>$tmp.sed
numsed=`expr $numsed + 1`
done
sed -f $tmp.sed <$tmp.list >$tmp.tmp
mv $tmp.tmp $tmp.list
xgroup=false
continue
fi
xpand=true
case "$r"
in
-\? | -h | --help) # usage
echo "Usage: $0 [options] [testlist]"'
common options
-v verbose
check options
-xdiff graphical mode diff
-e exit immediately on test failure
-n show me, do not run tests
-T output timestamps
-r randomize test order
-keep-passed keep directories of passed tests
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
NNN include test NNN
NNN-NNN include test range (eg. 012-021)
'
exit 0
;;
-g) # -g group ... pick from group file
group=true
xpand=false
;;
-xdiff) # graphical diff mode
xpand=false
if [ ! -z "$DISPLAY" ]
then
which xdiff >/dev/null 2>&1 && diff=xdiff
which gdiff >/dev/null 2>&1 && diff=gdiff
which tkdiff >/dev/null 2>&1 && diff=tkdiff
which xxdiff >/dev/null 2>&1 && diff=xxdiff
fi
;;
-e) # exit immediately on test failure
exit_on_err=true
xpand=false
;;
-n) # show me, don't do it
showme=true
xpand=false
;;
-r) # randomize test order
randomize=true
xpand=false
;;
-T) # turn on timestamp output
timestamp=true
xpand=false
;;
-v)
verbose=true
xpand=false
;;
-x) # -x group ... exclude from group file
xgroup=true
xpand=false
;;
'[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]')
echo "No tests?"
status=1
exit $status
;;
[0-9]*-[0-9]*)
eval `echo $r | sed -e 's/^/start=/' -e 's/-/ end=/'`
;;
[0-9]*-)
eval `echo $r | sed -e 's/^/start=/' -e 's/-//'`
end=`echo [0-9][0-9][0-9] [0-9][0-9][0-9][0-9] | sed -e 's/\[0-9]//g' -e 's/ *$//' -e 's/.* //'`
if [ -z "$end" ]
then
echo "No tests in range \"$r\"?"
status=1
exit $status
fi
;;
*)
start=$r
end=$r
;;
esac
# get rid of leading 0s as can be interpreted as octal
start=`echo $start | sed 's/^0*//'`
end=`echo $end | sed 's/^0*//'`
if $xpand
then
have_test_arg=true
$AWK_PROG </dev/null '
BEGIN { for (t='$start'; t<='$end'; t++) printf "%03d\n",t }' \
| while read id
do
if grep -s "^$id " group >/dev/null
then
# in group file ... OK
echo $id >>$tmp.list
else
if [ -f expunged ] && $expunge && egrep "^$id([ ]|\$)" expunged >/dev/null
then
# expunged ... will be reported, but not run, later
echo $id >>$tmp.list
else
# oops
echo "$id - unknown test, ignored"
fi
fi
done
fi
done
if [ -s $tmp.list ]
then
# found some valid test numbers ... this is good
:
else
if $have_test_arg
then
# had test numbers, but none in group file ... do nothing
touch $tmp.list
else
# no test numbers, do everything from group file
sed -n -e '/^[0-9][0-9][0-9]*/s/[ ].*//p' <group >$tmp.list
fi
fi
# should be sort -n, but this did not work for Linux when this
# was ported from IRIX
#
list=`sort $tmp.list`
rm -f $tmp.list $tmp.tmp $tmp.sed
if $randomize
then
list=`echo $list | awk -f randomize.awk`
fi
# we need common.rc
if ! . ./common.rc
then
echo "check: failed to source common.rc"
exit 1
fi
_wallclock()
{
date "+%H %M %S" | $AWK_PROG '{ print $1*3600 + $2*60 + $3 }'
}
_timestamp()
{
now=`date "+%T"`
echo -n " [$now]"
}
_wrapup()
{
# for hangcheck ...
# remove files that were used by hangcheck
#
[ -f /tmp/check.pid ] && rm -rf /tmp/check.pid
[ -f /tmp/check.sts ] && rm -rf /tmp/check.sts
if $showme
then
:
elif $needwrap
then
if [ -f check.time -a -f $tmp.time ]
then
cat check.time $tmp.time \
| $AWK_PROG '
{ t[$1] = $2 }
END { if (NR > 0) {
for (i in t) print i " " t[i]
}
}' \
| sort -n >$tmp.out
mv $tmp.out check.time
fi
if [ -f $tmp.expunged ]
then
notrun=`wc -l <$tmp.expunged | sed -e 's/ *//g'`
try=`expr $try - $notrun`
list=`echo "$list" | sed -f $tmp.expunged`
fi
echo "" >>check.log
date >>check.log
echo $list | fmt | sed -e 's/^/ /' >>check.log
$interrupt && echo "Interrupted!" >>check.log
if [ ! -z "$notrun" ]
then
echo "Not run:$notrun"
echo "Not run:$notrun" >>check.log
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]
then
echo "Failures:$bad"
echo "Failed $n_bad of $try tests"
echo "Failures:$bad" | fmt >>check.log
echo "Failed $n_bad of $try tests" >>check.log
else
echo "Passed all $try tests"
echo "Passed all $try tests" >>check.log
fi
needwrap=false
fi
rm -f /tmp/*.out /tmp/*.err /tmp/*.time
rm -f /tmp/check.pid /tmp/check.sts
rm -f $tmp.*
}
trap "_wrapup; exit \$status" 0 1 2 3 15
# for hangcheck ...
# Save pid of check in a well known place, so that hangcheck can be sure it
# has the right pid (getting the pid from ps output is not reliable enough).
#
rm -rf /tmp/check.pid
echo $$ >/tmp/check.pid
# for hangcheck ...
# Save the status of check in a well known place, so that hangcheck can be
# sure to know where check is up to (getting test number from ps output is
# not reliable enough since the trace stuff has been introduced).
#
rm -rf /tmp/check.sts
echo "preamble" >/tmp/check.sts
# don't leave old full output behind on a clean run
rm -f check.full
[ -f check.time ] || touch check.time
FULL_HOST_DETAILS=`_full_platform_details`
cat <<EOF
PLATFORM -- $FULL_HOST_DETAILS
EOF
seq="check"
[ -n "$TESTS_REMAINING_LOG" ] && echo $list > $TESTS_REMAINING_LOG
for seq in $list
do
STORE="$WD/$seq"
err=false
echo -n "$seq"
if [ -n "$TESTS_REMAINING_LOG" ] ; then
sed -e "s/$seq//" -e 's/ / /' -e 's/^ *//' $TESTS_REMAINING_LOG > $TESTS_REMAINING_LOG.tmp
mv $TESTS_REMAINING_LOG.tmp $TESTS_REMAINING_LOG
sync
fi
if $showme
then
description=`sed -n '3s/^#//p' $seq`
echo " $description"
continue
elif [ -f expunged ] && $expunge && egrep "^$seq([ ]|\$)" expunged >/dev/null
then
echo " - expunged"
rm -f $seq.out.bad
echo "/^$seq\$/d" >>$tmp.expunged
elif [ ! -f $seq ]
then
echo " - no such test?"
echo "/^$seq\$/d" >>$tmp.expunged
else
# really going to try and run this one
#
rm -f $seq.out.bad
lasttime=`sed -n -e "/^$seq /s/.* //p" <check.time`
description=`sed -n '3s/^#//p' $seq`
if [ "X$lasttime" != X ]; then
echo -n " Last Used:${lasttime}s. $description"
else
echo -n " $description" # prettier output with timestamps.
fi
rm -f core $seq.notrun
# for hangcheck ...
echo "$seq" >/tmp/check.sts
start=`_wallclock`
$timestamp && echo -n " ["`date "+%T"`"]"
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
./$seq >$tmp.out 2>&1
sts=$?
$timestamp && _timestamp
stop=`_wallclock`
if [ -f core ]
then
echo -n " [dumped core]"
mv core $seq.core
err=true
fi
if [ -f $seq.notrun ]
then
$timestamp || echo -n " [not run] "
$timestamp && echo " [not run]" && echo -n " $seq -- "
cat $seq.notrun
notrun="$notrun $seq"
else
if [ $sts -ne 0 ]
then
echo -n " [failed, exit status $sts]"
err=true
fi
if [ ! -f $seq.out ]
then
echo " - no qualified output"
err=true
else
if diff $seq.out $tmp.out >/dev/null 2>&1
then
echo ""
if $err
then
:
else
echo "$seq `expr $stop - $start`" >>$tmp.time
fi
else
echo " - output mismatch (see $seq.out.bad)"
mv $tmp.out $seq.out.bad
$diff $seq.out $seq.out.bad
err=true
fi
fi
fi
fi
# come here for each test, except when $showme is true
#
[ -f $seq.notrun ] || try=`expr $try + 1`
if $err
then
bad="$bad $seq"
n_bad=`expr $n_bad + 1`
quick=false
if $exit_on_err
then
break
fi
fi
seq="after_$seq"
done
interrupt=false
status=`expr $n_bad`
exit

View File

@ -0,0 +1,17 @@
set -e
seq=`basename $0`
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./swift.rc
. ./s3.rc
_sw_setup
_s3_setup

View File

@ -0,0 +1,114 @@
#!/bin/bash
#
# Copyright (C) 2009 Red Hat, Inc.
# Copyright (c) 2000-2003,2006 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# setup and check for config parameters
#
# all tests should use a common language setting to prevent golden
# output mismatches.
export LANG=C
PATH=".:$PATH"
HOST=`hostname -s`
HOSTOS=`uname -s`
export CHECK_OPTIONS=${CHECK_OPTIONS:="-g auto"}
export PWD=`pwd`
# $1 = prog to look for, $2* = default pathnames if not found in $PATH
set_prog_path()
{
p=`which $1 2> /dev/null`
if [ -n "$p" -a -x "$p" ]; then
echo $p
return 0
fi
p=$1
shift
for f; do
if [ -x $f ]; then
echo $f
return 0
fi
done
echo ""
return 1
}
_fatal()
{
echo "$*"
status=1
exit 1
}
export PERL_PROG="`set_prog_path perl`"
[ "$PERL_PROG" = "" ] && _fatal "perl not found"
export AWK_PROG="`set_prog_path awk`"
[ "$AWK_PROG" = "" ] && _fatal "awk not found"
export SED_PROG="`set_prog_path sed`"
[ "$SED_PROG" = "" ] && _fatal "sed not found"
export BC_PROG="`set_prog_path bc`"
[ "$BC_PROG" = "" ] && _fatal "bc not found"
export CURL_PROG="`set_prog_path curl`"
[ "$CURL_PROG" = "" ] && _fatal "curl not found"
if [ -z "$TEST_DIR" ]; then
TEST_DIR=`pwd`/scratch
fi
if [ ! -e "$TEST_DIR" ]; then
mkdir "$TEST_DIR"
fi
if [ ! -d "$TEST_DIR" ]; then
echo "common.config: Error: \$TEST_DIR ($TEST_DIR) is not a directory"
exit 1
fi
export TEST_DIR
export SWIFT_HOST=${SWIFT_HOST:-"localhost:8080"}
export KEYSTONE_HOST=${KEYSTONE_HOST:-"localhost:35357"}
export AUTH=${AUTH:-"keystone"}
export TENANT=${TENANT:-"test"}
export ADMIN_USER=${ADMIN_USER:-"admin"}
export ADMIN_PASS=${ADMIN_PASS:-"admin"}
export ADMIN_ACCESS_KEY=${ADMIN_ACCESS_KEY:-"${TENANT}:${ADMIN_USER}"}
export ADMIN_SECRET_KEY=${ADMIN_SECRET_KEY:-"${ADMIN_PASS}"}
export TESTER_USER=${TESTER_USER:-"tester"}
export TESTER_PASS=${TESTER_PASS:-"testing"}
export TESTER_ACCESS_KEY=${TESTER_ACCESS_KEY:-"${TENANT}:${TESTER_USER}"}
export TESTER_SECRET_KEY=${TESTER_SECRET_KEY:-"${TESTER_PASS}"}
export TESTER2_USER=${TESTER2_USER:-"tester2"}
export TESTER2_PASS=${TESTER2_PASS:-"testing2"}
export TESTER2_ACCESS_KEY=${TESTER2_ACCESS_KEY:-"${TENANT}:${TESTER2_USER}"}
export TESTER2_SECRET_KEY=${TESTER2_SECRET_KEY:-"${TESTER2_PASS}"}
# make sure this script returns success
/bin/true

View File

@ -0,0 +1,313 @@
#!/bin/bash
#
# Copyright (C) 2009 Red Hat, Inc.
# Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# standard filters
#
# Checks that given_value is in range of correct_value +/- tolerance.
# Tolerance can be an absolute value or a percentage of the correct value
# (see examples with tolerances below).
# Outputs suitable message to stdout if it's not in range.
#
# A verbose option, -v, may be used as the LAST argument
#
# e.g.
# foo: 0.0298 = 0.03 +/- 5%
# _within_tolerance "foo" 0.0298 0.03 5%
#
# foo: 0.0298 = 0.03 +/- 0.01
# _within_tolerance "foo" 0.0298 0.03 0.01
#
# foo: 0.0298 = 0.03 -0.01 +0.002
# _within_tolerance "foo" 0.0298 0.03 0.01 0.002
#
# foo: verbose output of 0.0298 = 0.03 +/- 5%
# _within_tolerance "foo" 0.0298 0.03 5% -v
_within_tolerance()
{
_name=$1
_given_val=$2
_correct_val=$3
_mintol=$4
_maxtol=$_mintol
_verbose=0
_debug=false
# maxtol arg is optional
# verbose arg is optional
if [ $# -ge 5 ]
then
if [ "$5" = "-v" ]
then
_verbose=1
else
_maxtol=$5
fi
fi
if [ $# -ge 6 ]
then
[ "$6" = "-v" ] && _verbose=1
fi
# find min with or without %
_mintolerance=`echo $_mintol | sed -e 's/%//'`
if [ $_mintol = $_mintolerance ]
then
_min=`echo "scale=5; $_correct_val-$_mintolerance" | bc`
else
_min=`echo "scale=5; $_correct_val-$_mintolerance*0.01*$_correct_val" | bc`
fi
# find max with or without %
_maxtolerance=`echo $_maxtol | sed -e 's/%//'`
if [ $_maxtol = $_maxtolerance ]
then
_max=`echo "scale=5; $_correct_val+$_maxtolerance" | bc`
else
_max=`echo "scale=5; $_correct_val+$_maxtolerance*0.01*$_correct_val" | bc`
fi
$_debug && echo "min = $_min"
$_debug && echo "max = $_max"
cat <<EOF >$tmp.bc.1
scale=5;
if ($_min <= $_given_val) 1;
if ($_min > $_given_val) 0;
EOF
cat <<EOF >$tmp.bc.2
scale=5;
if ($_given_val <= $_max) 1;
if ($_given_val > $_max) 0;
EOF
_above_min=`bc <$tmp.bc.1`
_below_max=`bc <$tmp.bc.2`
rm -f $tmp.bc.[12]
_in_range=`expr $_above_min \& $_below_max`
# fix up min, max precision for output
# can vary for 5.3, 6.2
_min=`echo $_min | sed -e 's/0*$//'` # get rid of trailling zeroes
_max=`echo $_max | sed -e 's/0*$//'` # get rid of trailling zeroes
if [ $_in_range -eq 1 ]
then
[ $_verbose -eq 1 ] && echo $_name is in range
return 0
else
[ $_verbose -eq 1 ] && echo $_name has value of $_given_val
[ $_verbose -eq 1 ] && echo $_name is NOT in range $_min .. $_max
return 1
fi
}
# ctime(3) dates
#
_filter_date()
{
sed -e 's/[A-Z][a-z][a-z] [A-Za-z][a-z][a-z] *[0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9] [0-9][0-9][0-9][0-9]$/DATE/'
}
# ISO dates
_filter_iso_date()
{
sed -e 's/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]/DATE/g'
}
_filter_short_date()
{
sed -e 's/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [0-9][0-9]:[0-9][0-9]/DATE/g'
}
# remove trailing whitespace, some versions of sg3_utils do that
_filter_spaces()
{
sed -e 's/ *$//'
}
_filter_eol()
{
tr -d '\r\n'
}
_filter_nop()
{
cat
}
_filter_user()
{
sed "s/${TENANT}:${ADMIN_USER}/ADMIN_USER/g" | \
sed "s/${TENANT}:${TESTER_USER}/TESTER/g" | \
sed "s/${TENANT}:${TESTER2_USER}/TESTER2/g"
}
_filter_tenant()
{
sed -e 's/AUTH_[a-z0-9]*\>/TENANT/g'
}
_filter_timestamp()
{
sed -e 's/[0-9]\{10\}\.[0-9]\{5\}/TIMESTAMP/g'
}
_filter_host()
{
sed "s/$SWIFT_HOST/SWIFT_HOST/g"
}
_filter_s3_iso_date()
{
sed -e 's/[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.[0-9][0-9][0-9]Z/DATE/g'
}
_filter_upload_id()
{
sed -e 's#<UploadId>[-_0-9a-zA-Z]*</#<UploadId>UPLOAD_ID</#g' \
-e 's#<NextUploadIdMarker>[-_0-9a-zA-Z]*</#<NextUploadIdMarker>UPLOAD_ID</#g' \
-e 's#uploadId=[-_0-9a-zA-Z]*#uploadId=UPLOAD_ID#g'
}
_filter_version_id()
{
sed -e 's#^x-amz-version-id: [-_0-9a-zA-Z]*#x-amz-version-id: VERSION_ID#g' \
-e 's#<VersionId>[-_0-9a-zA-Z]*</#<VersionId>VERSION_ID</#g' \
-e 's#<NextVersionIdMarker>[-_0-9a-zA-Z]*</#<NextVersionIdMarker>VERSION_ID</#g' \
-e 's#versionId=[-_0-9a-zA-Z]*#versionId=VERSION_ID#g'
}
_filter_txid()
{
sed -e 's/tx[a-z0-9]\{21\}-[a-z0-9]\{10\}\>/TXID/g'
}
_filter_etag()
{
sed -e 's/\<[a-z0-9]\{32\}\>/ETAG/g'
}
_filter_header_date()
{
sed -e 's/[A-Z][a-z][a-z], [0-9][0-9] [A-Z][a-z][a-z] [0-9]\{4\} [0-9][0-9]:[0-9][0-9]:[0-9][0-9] GMT/DATE/g'
}
_filter_header_content_length()
{
sed -e 's/^Content-Length: [0-9]*$/Content-Length: LENGTH/g'
}
# This is not necessary after Swift merge swift3 patch
_filter_header_capitalization()
{
perl -pe '$_=lc($&).$'\'' if /^x-(amz|rgw)-.+?:/i' | \
perl -pe '$_="ETag:".$'\'' if /^etag:/i'
}
_filter_header()
{
_filter_header_capitalization | _filter_header_date | _filter_user | \
_filter_upload_id | _filter_version_id | _filter_txid | _filter_etag | \
_filter_timestamp | _filter_header_content_length
}
_filter_body()
{
local format=$1
local fmt_filter=""
case "$format"
in
xml)
fmt_filter=_xmlindent
;;
json)
fmt_filter=_jsonindent
;;
*)
fmt_filter=_filter_nop
;;
esac
$fmt_filter | _filter_user | _filter_s3_iso_date | \
_filter_host | _filter_tenant | _filter_upload_id | \
_filter_version_id | _filter_txid | _filter_etag | \
_filter_timestamp
}
_filter_curl()
{
local format=$1
local type=body
local status=""
local header=""
local body=""
while read line; do
line=$(echo -n $line | _filter_eol)
if [[ "$body" == "" && "$line" == HTTP/1.1* ]]; then
type=status
fi
case "$type"
in
status)
if [[ "$line" == *:* ]]; then
type=header
header="${header}${line}\n"
else
status="${status}${line}\n"
fi
;;
header)
if [ "$line" == "" ]; then
type=body
else
header="${header}${line}\n"
fi
;;
body)
body="${body}${line}\n"
;;
esac
done
body="${body}${line}"
echo -en $status
if [ "$header" != "" ]; then
echo -en $header | _filter_header | sort -f
echo
fi
echo -en $body | _filter_body $format
}
_filter_curl_command()
{
sed "s#$tmp#/TMP#g" | _filter_user | _filter_host | _filter_tenant | \
_filter_upload_id | _filter_version_id | _filter_txid | _filter_etag | \
_filter_timestamp
}
# make sure this script returns success
/bin/true

View File

@ -0,0 +1,209 @@
#!/bin/bash
#
# Copyright (C) 2009 Red Hat, Inc.
# Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# we need common.config
if [ "$iam" != "check" ]
then
if ! . ./common.config
then
echo "$iam: failed to source common.config"
exit 1
fi
fi
# make sure we have a standard umask
umask 022
# check if run as root
#
_need_to_be_root()
{
local id=`id | $SED_PROG -e 's/(.*//' -e 's/.*=//'`
if [ "$id" -ne 0 ]
then
_notrun "you need to be root (not uid=$id) to run this test"
fi
}
# To remove directory successfully always, we have to rename it first
# so that new files are not created in the directory while we remove it.
_safe_remove()
{
local dir=$1
mv ${dir} ${dir}.tmp
rm -rf ${dir}.tmp
}
# Do a command, log it to $seq.full, optionally test return status
# and die if command fails. If called with one argument _do executes the
# command, logs it, and returns its exit status. With two arguments _do
# first prints the message passed in the first argument, and then "done"
# or "fail" depending on the return status of the command passed in the
# second argument. If the command fails and the variable _do_die_on_error
# is set to "always" or the two argument form is used and _do_die_on_error
# is set to "message_only" _do will print an error message to
# $seq.out and exit.
_do()
{
if [ $# -eq 1 ]; then
_cmd=$1
elif [ $# -eq 2 ]; then
_note=$1
_cmd=$2
echo -n "$_note... "
else
echo "Usage: _do [note] cmd" 1>&2
status=1; exit
fi
(eval "echo '---' \"$_cmd\"") >>$here/$seq.full
(eval "$_cmd") >$tmp._out 2>&1; ret=$?
cat $tmp._out >>$here/$seq.full
if [ $# -eq 2 ]; then
if [ $ret -eq 0 ]; then
echo "done"
else
echo "fail"
fi
fi
if [ $ret -ne 0 ] \
&& [ "$_do_die_on_error" = "always" \
-o \( $# -eq 2 -a "$_do_die_on_error" = "message_only" \) ]
then
[ $# -ne 2 ] && echo
eval "echo \"$_cmd\" failed \(returned $ret\): see $seq.full"
status=1; exit
fi
return $ret
}
# bail out, setting up .notrun file
#
_notrun()
{
echo "$*" >$seq.notrun
echo "$seq not run: $*"
status=0
exit
}
# just plain bail out
#
_fail()
{
echo "$*" | tee -a $here/$seq.full
echo "(see $seq.full for details)"
status=1
exit 1
}
# this test requires that a specified command (executable) exists
#
_require_command()
{
[ -n "`which $1`" ] || _notrun "$1 utility required, skipped this test"
[ -x "`which $1`" ] || _notrun "$1 utility required, skipped this test"
}
_full_platform_details()
{
os=`uname -s`
host=`hostname -s`
kernel=`uname -r`
platform=`uname -m`
echo "$os/$platform $host $kernel"
}
_die()
{
echo $@
exit 1
}
_random()
{
openssl enc -rc4 -pass pass:"$(date)" < /dev/zero 2>/dev/null
}
_one()
{
yes $'\xFF' | tr -d "\n"
}
_hq()
{
local name=$1
egrep -i "^$name: " | $AWK_PROG '{print $2}' | _filter_eol
}
_xmlindent()
{
./xmlindent.py
}
_xpath()
{
local path=$1
./xpath.py ${path}
}
_md5()
{
local file=$1
cat $file | openssl md5 -binary | base64 | _filter_eol
}
_etag()
{
local file=$1
md5sum $file | awk '{print $1}' | _filter_eol
}
_file_size()
{
local file=$1
wc -c $file | awk '{print $1}' | _filter_eol
}
_is_http_success()
{
local status=$1
[ $(($status / 100)) == 2 ]
}
_retry()
{
for n in `seq 5`; do
"$@" > /dev/null 2>&1 && return
echo "try again"
sleep 1
done
_die "FAILED: $@"
}
# make sure this script returns success
/bin/true

View File

@ -0,0 +1,13 @@
[DEFAULT]
user = %USER%
swift_dir = %TEST_DIR%/etc
devices = %TEST_DIR%
mount_check = false
workers = 1
log_level = DEBUG
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account

View File

@ -0,0 +1,14 @@
[DEFAULT]
user = %USER%
swift_dir = %TEST_DIR%/etc
devices = %TEST_DIR%
mount_check = false
workers = 1
log_level = DEBUG
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
allow_versions = true

View File

@ -0,0 +1,112 @@
# Keystone PasteDeploy configuration file.
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:build_auth_context]
paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:xml_body_v2]
paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory
[filter:xml_body_v3]
paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:ec2_extension_v3]
paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
[filter:federation_extension]
paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
[filter:oauth1_extension]
paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:endpoint_filter_extension]
paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
[filter:simple_cert_extension]
paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
[filter:revoke_extension]
paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = sizelimit url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = sizelimit url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -0,0 +1,17 @@
[DEFAULT]
policy_file=%CONF_DIR%/policy.json
[database]
connection=sqlite:///%TEST_DIR%/keystone.db
[paste_deploy]
config_file=%CONF_DIR%/keystone-paste.ini
[signing]
certfile=%TEST_DIR%/certs/signing_cert.pem
keyfile=%TEST_DIR%/private/signing_key.pem
ca_certs=%TEST_DIR%/certs/ca.pem
ca_key=%TEST_DIR%/private/cakey.pem

View File

@ -0,0 +1,13 @@
[DEFAULT]
user = %USER%
swift_dir = %TEST_DIR%/etc
devices = %TEST_DIR%
mount_check = false
workers = 1
log_level = DEBUG
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object

View File

@ -0,0 +1,144 @@
{
"admin_required": "role:admin or is_admin:1",
"service_role": "role:service",
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "user_id:%(user_id)s",
"admin_or_owner": "rule:admin_required or rule:owner",
"default": "rule:admin_required",
"identity:get_region": "",
"identity:list_regions": "",
"identity:create_region": "rule:admin_required",
"identity:update_region": "rule:admin_required",
"identity:delete_region": "rule:admin_required",
"identity:get_service": "rule:admin_required",
"identity:list_services": "rule:admin_required",
"identity:create_service": "rule:admin_required",
"identity:update_service": "rule:admin_required",
"identity:delete_service": "rule:admin_required",
"identity:get_endpoint": "rule:admin_required",
"identity:list_endpoints": "rule:admin_required",
"identity:create_endpoint": "rule:admin_required",
"identity:update_endpoint": "rule:admin_required",
"identity:delete_endpoint": "rule:admin_required",
"identity:get_domain": "rule:admin_required",
"identity:list_domains": "rule:admin_required",
"identity:create_domain": "rule:admin_required",
"identity:update_domain": "rule:admin_required",
"identity:delete_domain": "rule:admin_required",
"identity:get_project": "rule:admin_required",
"identity:list_projects": "rule:admin_required",
"identity:list_user_projects": "rule:admin_or_owner",
"identity:create_project": "rule:admin_required",
"identity:update_project": "rule:admin_required",
"identity:delete_project": "rule:admin_required",
"identity:get_user": "rule:admin_required",
"identity:list_users": "rule:admin_required",
"identity:create_user": "rule:admin_required",
"identity:update_user": "rule:admin_required",
"identity:delete_user": "rule:admin_required",
"identity:change_password": "rule:admin_or_owner",
"identity:get_group": "rule:admin_required",
"identity:list_groups": "rule:admin_required",
"identity:list_groups_for_user": "rule:admin_or_owner",
"identity:create_group": "rule:admin_required",
"identity:update_group": "rule:admin_required",
"identity:delete_group": "rule:admin_required",
"identity:list_users_in_group": "rule:admin_required",
"identity:remove_user_from_group": "rule:admin_required",
"identity:check_user_in_group": "rule:admin_required",
"identity:add_user_to_group": "rule:admin_required",
"identity:get_credential": "rule:admin_required",
"identity:list_credentials": "rule:admin_required",
"identity:create_credential": "rule:admin_required",
"identity:update_credential": "rule:admin_required",
"identity:delete_credential": "rule:admin_required",
"identity:ec2_get_credential": "rule:admin_or_owner",
"identity:ec2_list_credentials": "rule:admin_or_owner",
"identity:ec2_create_credential": "rule:admin_or_owner",
"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:get_role": "rule:admin_required",
"identity:list_roles": "rule:admin_required",
"identity:create_role": "rule:admin_required",
"identity:update_role": "rule:admin_required",
"identity:delete_role": "rule:admin_required",
"identity:check_grant": "rule:admin_required",
"identity:list_grants": "rule:admin_required",
"identity:create_grant": "rule:admin_required",
"identity:revoke_grant": "rule:admin_required",
"identity:list_role_assignments": "rule:admin_required",
"identity:get_policy": "rule:admin_required",
"identity:list_policies": "rule:admin_required",
"identity:create_policy": "rule:admin_required",
"identity:update_policy": "rule:admin_required",
"identity:delete_policy": "rule:admin_required",
"identity:check_token": "rule:admin_required",
"identity:validate_token": "rule:service_or_admin",
"identity:validate_token_head": "rule:service_or_admin",
"identity:revocation_list": "rule:service_or_admin",
"identity:revoke_token": "rule:admin_or_owner",
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
"identity:get_trust": "rule:admin_or_owner",
"identity:list_trusts": "",
"identity:list_roles_for_trust": "",
"identity:check_role_for_trust": "",
"identity:get_role_for_trust": "",
"identity:delete_trust": "",
"identity:create_consumer": "rule:admin_required",
"identity:get_consumer": "rule:admin_required",
"identity:list_consumers": "rule:admin_required",
"identity:delete_consumer": "rule:admin_required",
"identity:update_consumer": "rule:admin_required",
"identity:authorize_request_token": "rule:admin_required",
"identity:list_access_token_roles": "rule:admin_required",
"identity:get_access_token_role": "rule:admin_required",
"identity:list_access_tokens": "rule:admin_required",
"identity:get_access_token": "rule:admin_required",
"identity:delete_access_token": "rule:admin_required",
"identity:list_projects_for_endpoint": "rule:admin_required",
"identity:add_endpoint_to_project": "rule:admin_required",
"identity:check_endpoint_in_project": "rule:admin_required",
"identity:list_endpoints_for_project": "rule:admin_required",
"identity:remove_endpoint_from_project": "rule:admin_required",
"identity:create_identity_provider": "rule:admin_required",
"identity:list_identity_providers": "rule:admin_required",
"identity:get_identity_providers": "rule:admin_required",
"identity:update_identity_provider": "rule:admin_required",
"identity:delete_identity_provider": "rule:admin_required",
"identity:create_protocol": "rule:admin_required",
"identity:update_protocol": "rule:admin_required",
"identity:get_protocol": "rule:admin_required",
"identity:list_protocols": "rule:admin_required",
"identity:delete_protocol": "rule:admin_required",
"identity:create_mapping": "rule:admin_required",
"identity:get_mapping": "rule:admin_required",
"identity:list_mappings": "rule:admin_required",
"identity:delete_mapping": "rule:admin_required",
"identity:update_mapping": "rule:admin_required",
"identity:list_projects_for_groups": "",
"identity:list_domains_for_groups": "",
"identity:list_revoke_events": ""
}

View File

@ -0,0 +1,73 @@
[DEFAULT]
bind_port = 8080
user = %USER%
swift_dir = %TEST_DIR%/etc
devices = %TEST_DIR%
mount_check = false
workers = 1
account_autocreate = true
log_level = DEBUG
[pipeline:main]
pipeline = catch_errors proxy-logging cache swift3 %MIDDLEWARE% bulk slo proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:tempauth]
use = egg:swift#tempauth
user_test_admin = admin .admin
user_test_tester = testing .admin
user_test_tester2 = testing2 .admin
[filter:swift3]
use = egg:swift3#swift3
#location = 'jp'
storage_domain = localhost
pretty_print_xml = true
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:bulk]
use = egg:swift#bulk
[filter:slo]
use = egg:swift#slo
min_segment_size = 4
[filter:dlo]
use = egg:swift#dlo
[filter:cache]
use = egg:swift#memcache
[filter:s3token]
paste.filter_factory = keystone.middleware.s3_token:filter_factory
auth_host = localhost
auth_port = 35357
auth_protocol = http
auth_uri = http://localhost:5000/
admin_tenant_name = service
admin_user = swift
admin_password = password
cache = swift.cache
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = localhost
auth_port = 35357
auth_protocol = http
auth_uri = http://localhost:5000/
admin_tenant_name = service
admin_user = swift
admin_password = password
cache = swift.cache
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin, swiftoperator
reseller_admin_role = ResellerAdmin

View File

@ -0,0 +1,3 @@
[swift-hash]
swift_hash_path_suffix=swift3
swift_hash_path_prefix=swift3

View File

@ -0,0 +1,16 @@
#
# QA groups control file
#
# Defines test groups
# - do not start group names with a digit
# - test-group association are one line per test
# - each test can be part of multiple groups
#
# auto: tests that are run by defaul
# quick: test that take less than 30 seconds (normally)
# bucket: test bucket operations
# object: test object operations
#
001 auto quick
002 auto quick
003 auto quick

View File

@ -0,0 +1,30 @@
#!/usr/bin/env python
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from swift.common.utils import parse_options
from swift.common import utils
utils.SWIFT_CONF_FILE = 'conf/swift.conf'
from swift.common.wsgi import run_wsgi
if __name__ == '__main__':
server = sys.argv.pop(1)
port = sys.argv.pop(1)
conf_file, options = parse_options()
sys.exit(run_wsgi(conf_file, server + '-server', default_port=port,
**options))

View File

@ -0,0 +1,104 @@
#!/bin/bash
cd $(readlink -f $(dirname $0))
. ./common.config
CONF_DIR=$(readlink -f ./conf)
rm -rf $TEST_DIR
mkdir -p ${TEST_DIR}/etc ${TEST_DIR}/log
mkdir -p ${TEST_DIR}/sda ${TEST_DIR}/sdb ${TEST_DIR}/sdc
mkdir -p ${TEST_DIR}/certs ${TEST_DIR}/private
# create config files
if [ "$AUTH" == 'keystone' ]; then
MIDDLEWARE="s3token authtoken keystoneauth"
elif [ "$AUTH" == 'tempauth' ]; then
MIDDLEWARE="tempauth"
else
_fatal "unknown auth: $AUTH"
fi
for server in keystone swift proxy-server object-server container-server account-server; do
sed -e "s#%MIDDLEWARE%#${MIDDLEWARE}#g" \
-e "s#%USER%#`whoami`#g" \
-e "s#%TEST_DIR%#${TEST_DIR}#g" \
-e "s#%CONF_DIR%#${CONF_DIR}#g" \
conf/${server}.conf.in \
> conf/${server}.conf
done
# setup keystone
if [ "$AUTH" == 'keystone' ]; then
. ./setup_keystone
fi
# build ring
cd ${TEST_DIR}/etc/
swift-ring-builder object.builder create 0 3 0
swift-ring-builder container.builder create 0 3 0
swift-ring-builder account.builder create 0 3 0
swift-ring-builder object.builder add z0-127.0.0.1:6000/sda 1
swift-ring-builder object.builder add z1-127.0.0.1:6000/sdb 1
swift-ring-builder object.builder add z2-127.0.0.1:6000/sdc 1
swift-ring-builder container.builder add z0-127.0.0.1:6001/sda 1
swift-ring-builder container.builder add z1-127.0.0.1:6001/sdb 1
swift-ring-builder container.builder add z2-127.0.0.1:6001/sdc 1
swift-ring-builder account.builder add z0-127.0.0.1:6002/sda 1
swift-ring-builder account.builder add z1-127.0.0.1:6002/sdb 1
swift-ring-builder account.builder add z2-127.0.0.1:6002/sdc 1
swift-ring-builder object.builder rebalance
swift-ring-builder container.builder rebalance
swift-ring-builder account.builder rebalance
cd -
# start swift servers
_start()
{
local name=$1; shift
echo Start ${name}-server.
"$@" > ${TEST_DIR}/log/${name}.log 2>&1 &
export ${name}_pid=$!
local cnt
for cnt in `seq 60`; do # wait at most 60 seconds
grep 'Started child' ${TEST_DIR}/log/${name}.log > /dev/null
if [ $? == 0 ]; then
return
fi
sleep 1
done
cat ${TEST_DIR}/log/${name}.log
_fatal "Cannot start ${name}-server."
}
_start account ./run_daemon.py account 6002 conf/account-server.conf -v
_start container ./run_daemon.py container 6001 conf/container-server.conf -v
_start object ./run_daemon.py object 6000 conf/object-server.conf -v
coverage erase
_start proxy coverage run --branch --include=../../* --omit=./* \
./run_daemon.py proxy 8080 conf/proxy-server.conf -v
# run tests
./check "$@"
rvalue=$?
# cleanup
kill -HUP $proxy_pid $account_pid $container_pid $object_pid $keystone_pid
# show report
sleep 3
coverage report
coverage html
exit $rvalue

View File

@ -0,0 +1,108 @@
#!/bin/bash
#
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_s3_setup()
{
echo "
%awsSecretAccessKeys = (
tester => {
id => '$TESTER_ACCESS_KEY',
key => '$TESTER_SECRET_KEY',
endpoints => '${SWIFT_HOST%:*}',
},
tester2 => {
id => '$TESTER2_ACCESS_KEY',
key => '$TESTER2_SECRET_KEY',
endpoints => '${SWIFT_HOST%:*}',
},
admin => {
id => '$ADMIN_ACCESS_KEY',
key => '$ADMIN_SECRET_KEY',
endpoints => '${SWIFT_HOST%:*}',
},
);" > .s3curl
chmod 600 .s3curl
}
_s3curl()
{
local tmp_file=$tmp.$RANDOM
local args=""
if [ "$S3USER" == "" ]; then
_die "S3USER is not defined."
fi
args="--id $S3USER"
if [ "$MD5" != "" ]; then
args="$args --contentMd5 $MD5"
fi
if [ "$CONTENT_TYPE" != "" ]; then
args="$args --contentType $CONTENT_TYPE"
fi
LANG=C ./s3curl.pl $args -- -s "$@" -w '%{http_code}' > $tmp_file
status=$(tail -c -3 $tmp_file)
echo "> s3curl $args -- $@... $status" | _filter_curl_command >&2
head -c -3 $tmp_file
_is_http_success $status
}
_s3_head()
{
local path=$1; shift
_s3curl -I -X HEAD "$@" http://${SWIFT_HOST}${path}
}
_s3_get()
{
local path=$1; shift
_s3curl -X GET "$@" http://${SWIFT_HOST}${path}
}
_s3_put()
{
local path=$1; shift
_s3curl -X PUT "$@" http://${SWIFT_HOST}${path}
}
_s3_post()
{
local path=$1; shift
_s3curl -X POST "$@" http://${SWIFT_HOST}${path}
}
_s3_delete()
{
local path=$1; shift
_s3curl -X DELETE "$@" http://${SWIFT_HOST}${path}
}
# make sure this script returns success
/bin/true

326
swift3/test/functional/s3curl.pl Executable file
View File

@ -0,0 +1,326 @@
#!/usr/bin/perl -w
# Copyright 2006-2010 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this
# file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
# for the specific language governing permissions and limitations under the License.
use strict;
use POSIX;
# you might need to use CPAN to get these modules.
# run perl -MCPAN -e "install <module>" to get them.
use FindBin;
use Getopt::Long qw(GetOptions);
use constant STAT_MODE => 2;
use constant STAT_UID => 4;
# begin customizing here
my $CURL = "curl";
# stop customizing here
my @endpoints = ();
my $cmdLineSecretKey;
my %awsSecretAccessKeys = ();
my $keyFriendlyName;
my $keyId;
my $secretKey;
my $contentType = "";
my $acl;
my $contentMD5 = "";
my $fileToPut;
my $createBucket;
my $doDelete;
my $doHead;
my $help;
my $debug = 0;
my $copySourceObject;
my $copySourceRange;
my $postBody;
my $print;
my $DOTFILENAME=".s3curl";
my $EXECFILE=$FindBin::Bin;
my $LOCALDOTFILE = $EXECFILE . "/" . $DOTFILENAME;
my $HOMEDOTFILE = $ENV{HOME} . "/" . $DOTFILENAME;
my $DOTFILE = -f $LOCALDOTFILE? $LOCALDOTFILE : $HOMEDOTFILE;
if (-f $DOTFILE) {
open(CONFIG, $DOTFILE) || die "can't open $DOTFILE: $!";
my @stats = stat(*CONFIG);
if (($stats[STAT_UID] != $<) || $stats[STAT_MODE] & 066) {
die "I refuse to read your credentials from $DOTFILE as this file is " .
"readable by, writable by or owned by someone else. Try " .
"chmod 600 $DOTFILE";
}
my @lines = <CONFIG>;
close CONFIG;
eval("@lines");
die "Failed to eval() file $DOTFILE:\n$@\n" if ($@);
}
GetOptions(
'id=s' => \$keyId,
'key=s' => \$cmdLineSecretKey,
'contentType=s' => \$contentType,
'acl=s' => \$acl,
'contentMd5=s' => \$contentMD5,
'put=s' => \$fileToPut,
'copySrc=s' => \$copySourceObject,
'copySrcRange=s' => \$copySourceRange,
'post:s' => \$postBody,
'delete' => \$doDelete,
'createBucket:s' => \$createBucket,
'head' => \$doHead,
'help' => \$help,
'debug' => \$debug,
'print' => \$print,
);
my $usage = <<USAGE;
Usage $0 --id friendly-name (or AWSAccessKeyId) [options] -- [curl-options] [URL]
options:
--key SecretAccessKey id/key are AWSAcessKeyId and Secret (unsafe)
--contentType text/plain set content-type header
--acl public-read use a 'canned' ACL (x-amz-acl header)
--contentMd5 content_md5 add x-amz-content-md5 header
--put <filename> PUT request (from the provided local file)
--post [<filename>] POST request (optional local file)
--copySrc bucket/key Copy from this source key
--copySrcRange {startIndex}-{endIndex}
--createBucket [<region>] create-bucket with optional location constraint
--head HEAD request
--debug enable debug logging
--print print command instead of executing it
common curl options:
-H 'x-amz-acl: public-read' another way of using canned ACLs
-v verbose logging
USAGE
die $usage if $help || !defined $keyId;
if ($cmdLineSecretKey) {
printCmdlineSecretWarning();
sleep 5;
$secretKey = $cmdLineSecretKey;
} else {
my $keyinfo = $awsSecretAccessKeys{$keyId};
die "I don't know about key with friendly name $keyId. " .
"Do you need to set it up in $DOTFILE?"
unless defined $keyinfo;
$keyId = $keyinfo->{id};
$secretKey = $keyinfo->{key};
@endpoints = split /,/, $keyinfo->{endpoints};
}
my $method = "";
if (defined $fileToPut or defined $createBucket or defined $copySourceObject) {
$method = "PUT";
} elsif (defined $doDelete) {
$method = "DELETE";
} elsif (defined $doHead) {
$method = "HEAD";
} elsif (defined $postBody) {
$method = "POST";
} else {
$method = "GET";
}
my $resource;
my $host;
my %xamzHeaders;
$xamzHeaders{'x-amz-acl'}=$acl if (defined $acl);
$xamzHeaders{'x-amz-copy-source'}=$copySourceObject if (defined $copySourceObject);
$xamzHeaders{'x-amz-copy-source-range'}="bytes=$copySourceRange" if (defined $copySourceRange);
# try to understand curl args
for (my $i=0; $i<@ARGV; $i++) {
my $arg = $ARGV[$i];
# resource name
if ($arg =~ /https?:\/\/([^\/:?]+)(?::(\d+))?([^?]*)(?:\?(\S+))?/) {
$host = $1 if !$host;
my $port = defined $2 ? $2 : "";
my $requestURI = $3;
my $query = defined $4 ? $4 : "";
debug("Found the url: host=$host; port=$port; uri=$requestURI; query=$query;");
if (length $requestURI) {
$resource = $requestURI;
} else {
$resource = "/";
}
my @attributes = ();
for my $attribute ("acl", "location", "logging", "notification", "lifecycle",
"partNumber", "policy", "requestPayment", "response-cache-control",
"response-content-disposition", "response-content-encoding", "response-content-language",
"response-content-type", "response-expires", "torrent", "delete", "restore",
"uploadId", "uploads", "versionId", "versioning", "versions", "website") {
if ($query =~ /(?:^|&)($attribute(?:=[^&]*)?)(?:&|$)/) {
push @attributes, uri_unescape($1);
}
}
if (@attributes) {
$resource .= "?" . join("&", @attributes);
}
# handle virtual hosted requests
getResourceToSign($host, \$resource);
}
elsif ($arg =~ /\-X/) {
# mainly for DELETE
$method = $ARGV[++$i];
}
elsif ($arg =~ /\-H/) {
my $header = $ARGV[++$i];
#check for host: and x-amz*
if ($header =~ /^[Hh][Oo][Ss][Tt]:(.+)$/) {
$host = $1;
}
elsif ($header =~ /^([Xx]-[Aa][Mm][Zz]-.+?): *(.+)$/) {
my $name = lc $1;
my $value = $2;
# merge with existing values
if (exists $xamzHeaders{$name}) {
$value = $xamzHeaders{$name} . "," . $value;
}
$xamzHeaders{$name} = $value;
}
}
}
die "Couldn't find resource by digging through your curl command line args!"
unless defined $resource;
my $xamzHeadersToSign = "";
foreach (sort (keys %xamzHeaders)) {
my $headerValue = $xamzHeaders{$_};
$xamzHeadersToSign .= "$_:$headerValue\n";
}
my $httpDate = POSIX::strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime );
my $stringToSign = "$method\n$contentMD5\n$contentType\n$httpDate\n$xamzHeadersToSign$resource";
debug("StringToSign='" . $stringToSign . "'");
my $signature = `echo -n "$stringToSign" | openssl dgst -sha1 -hmac "$secretKey" -binary | base64`;
chomp($signature);
debug("signature='" . $signature. "'");
my @args = ();
push @args, ("-H", "Date: $httpDate");
push @args, ("-H", "Authorization: AWS $keyId:$signature");
push @args, ("-H", "x-amz-acl: $acl") if (defined $acl);
push @args, ("-L");
push @args, ("-H", "content-type: $contentType") if (length $contentType);
push @args, ("-H", "Content-MD5: $contentMD5") if (length $contentMD5);
push @args, ("-T", $fileToPut) if (defined $fileToPut);
push @args, ("-X", "DELETE") if (defined $doDelete);
push @args, ("-X", "POST") if(defined $postBody);
push @args, ("-I") if (defined $doHead);
if (defined $createBucket) {
# createBucket is a special kind of put from stdin. Reason being, curl mangles the Request-URI
# to include the local filename when you use -T and it decides there is no remote filename (bucket PUT)
my $data="";
if (length($createBucket)>0) {
$data="<CreateBucketConfiguration><LocationConstraint>$createBucket</LocationConstraint></CreateBucketConfiguration>";
}
push @args, ("--data-binary", $data);
push @args, ("-X", "PUT");
} elsif (defined $copySourceObject) {
# copy operation is a special kind of PUT operation where the resource to put
# is specified in the header
push @args, ("-X", "PUT");
push @args, ("-H", "x-amz-copy-source: $copySourceObject");
} elsif (defined $postBody) {
if (length($postBody)>0) {
push @args, ("-T", $postBody);
}
}
push @args, @ARGV;
debug("exec $CURL " . join (" ", @args));
if (defined($print)) {
print join(" ", $CURL, @args, "\n");
exit(0)
}
exec($CURL, @args) or die "can't exec program: $!";
sub debug {
my ($str) = @_;
$str =~ s/\n/\\n/g;
print STDERR "s3curl: $str\n" if ($debug);
}
sub getResourceToSign {
my ($host, $resourceToSignRef) = @_;
for my $ep (@endpoints) {
if ($host =~ /(.*)\.$ep/) { # vanity subdomain case
my $vanityBucket = $1;
$$resourceToSignRef = "/$vanityBucket".$$resourceToSignRef;
debug("vanity endpoint signing case");
return;
}
elsif ($host eq $ep) {
debug("ordinary endpoint signing case");
return;
}
}
# cname case
$$resourceToSignRef = "/$host".$$resourceToSignRef;
debug("cname endpoint signing case");
}
sub printCmdlineSecretWarning {
print STDERR <<END_WARNING;
WARNING: It isn't safe to put your AWS secret access key on the
command line! The recommended key management system is to store
your AWS secret access keys in a file owned by, and only readable
by you.
For example:
\%awsSecretAccessKeys = (
# personal account
personal => {
id => '1ME55KNV6SBTR7EXG0R2',
key => 'zyMrlZUKeG9UcYpwzlPko/+Ciu0K2co0duRM3fhi',
},
# corporate account
company => {
id => '1ATXQ3HHA59CYF1CVS02',
key => 'WQY4SrSS95pJUT95V6zWea01gBKBCL6PI0cdxeH8',
},
);
\$ chmod 600 $DOTFILE
Will sleep and continue despite this problem.
Please set up $DOTFILE for future requests.
END_WARNING
}
sub uri_unescape {
my ($input) = @_;
$input =~ s/\%([A-Fa-f0-9]{2})/pack('C', hex($1))/seg;
debug("replaced string: " . $input);
return ($input);
}

View File

@ -0,0 +1,73 @@
export OS_AUTH_URL=http://localhost:35357/v2.0
export OS_TENANT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=password
export OS_TOKEN=ADMIN
export OS_URL=http://localhost:35357/v2.0
_get_id()
{
awk '/ id / { print $4 }'
}
_add_user()
{
local name=$1
local tenant=$2
local user=$3
local password=$4
local role=$5
TENANT_ID=$(openstack project list | awk "/ $tenant / { print \$2 }")
if [ "$TENANT_ID" == "" ]; then
# create a new tenant
TENANT_ID=$(openstack project create $tenant | _get_id)
fi
USER_ID=$(openstack user create $user --password=$password \
--project $TENANT_ID | _get_id)
if [ "$role" != "" ]; then
ROLE_ID=$(openstack role list | awk "/ $role / { print \$2 }")
if [ "$ROLE_ID" == "" ]; then
# create a new role
ROLE_ID=$(openstack role create $role | _get_id)
fi
openstack role add --user $USER_ID --project $TENANT_ID $ROLE_ID
fi
eval $(openstack ec2 credentials create --user $user --project $tenant \
-f shell -c access -c secret)
export ${name}_ACCESS_KEY=$access
export ${name}_SECRET_KEY=$secret
}
_create_swift_accounts()
{
_add_user SERVICE service swift password admin
_add_user ADMIN test admin admin ResellerAdmin
_add_user TESTER test tester testing admin
_add_user TESTER2 test tester2 testing2 admin
SERVICE=$(openstack service create swift --type=object-store | _get_id)
openstack endpoint create $SERVICE \
--publicurl "http://localhost:8080/v1/AUTH_\$(tenant_id)s"
}
_setup_keystone()
{
keystone-all --config-file conf/keystone.conf --debug > ${TEST_DIR}/log/keystone.log 2>&1 &
export keystone_pid=$!
mysql -uroot -ppassword -e "DROP DATABASE IF EXISTS keystone;"
mysql -uroot -ppassword -e "CREATE DATABASE keystone CHARACTER SET utf8;"
keystone-manage --config-file conf/keystone.conf --debug db_sync
keystone-manage --config-file conf/keystone.conf --debug pki_setup
_create_swift_accounts
}
_setup_keystone

View File

@ -0,0 +1,112 @@
#!/bin/bash
#
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_sw_setup()
{
case "$AUTH"
in
tempauth)
export TENANT_ID="AUTH_${TENANT}"
export TOKEN_ID=$($CURL_PROG -s -I -X GET \
-H "x-storage-user: ${TENANT}:${ADMIN_USER}" \
-H "x-storage-pass: ${ADMIN_PASS}" \
http://${SWIFT_HOST}/auth/v1.0 | \
_hq x-storage-token)
;;
keystone)
local xml="<auth tenantName=\"${TENANT}\">
<passwordCredentials username=\"${ADMIN_USER}\"
password=\"${ADMIN_PASS}\"/></auth>"
local res=$($CURL_PROG -s -d "$xml" \
-H 'Content-type: application/xml' \
-H 'Accept: application/xml' \
http://${KEYSTONE_HOST}/v2.0/tokens)
export TENANT_ID="AUTH_$(echo $res | _xpath '/access/token/tenant/@id')"
export TOKEN_ID=$(echo $res | _xpath '/access/token/@id')
;;
*)
_die "unknown auth, $AUTH"
;;
esac
local c
local o
# remove user data
for c in $(_sw_get / 2>/dev/null); do
_retry _sw_post /$c -H "x-versions-location: $c" # disable versioning
for o in $(_sw_get /$c 2>/dev/null); do
_retry _sw_delete /$c/$o
done
_retry _sw_delete /$c
done
}
_swcurl()
{
local tmp_file=$tmp.$RANDOM
$CURL_PROG -s -H "x-storage-token: ${TOKEN_ID}" "$@" -w '%{http_code}' \
> $tmp_file
status=$(tail -c -3 $tmp_file)
echo "> curl $@... $status" | _filter_curl_command >&2
head -c -3 $tmp_file
_is_http_success $status
}
_sw_head()
{
local path=$1; shift
_swcurl -I -X HEAD "$@" http://${SWIFT_HOST}/v1/${TENANT_ID}${path}
}
_sw_get()
{
local path=$1; shift
_swcurl -X GET "$@" http://${SWIFT_HOST}/v1/${TENANT_ID}${path}
}
_sw_put()
{
local path=$1; shift
_swcurl -X PUT "$@" http://${SWIFT_HOST}/v1/${TENANT_ID}${path}
}
_sw_post()
{
local path=$1; shift
_swcurl -X POST "$@" http://${SWIFT_HOST}/v1/${TENANT_ID}${path}
}
_sw_delete()
{
local path=$1; shift
_swcurl -X DELETE "$@" http://${SWIFT_HOST}/v1/${TENANT_ID}${path}
}
# make sure this script returns success
/bin/true

View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from lxml.etree import fromstring, tostring
if __name__ == '__main__':
xml = sys.stdin.read().replace('\n', '')
elem = fromstring(xml)
print tostring(elem, xml_declaration=True, encoding='UTF-8',
pretty_print=True),

23
swift3/test/functional/xpath.py Executable file
View File

@ -0,0 +1,23 @@
#!/usr/bin/env python
# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from swift3.etree import fromstring
if __name__ == '__main__':
elem = fromstring(sys.stdin.read())
print elem.xpath(sys.argv[1])[0]

View File

@ -4,3 +4,4 @@ nose
openstack.nose_plugin
coverage
pylint
python-openstackclient

12
tox.ini
View File

@ -1,13 +1,15 @@
[tox]
envlist = py26,py27,pylint,pep8
envlist = py26,py27,tempauth,keystone,pylint,pep8
minversion = 1.6
skipsdist = True
[testenv]
whitelist_externals =/bin/bash
usedevelop = True
install_command = pip install {opts} {packages}
deps =
https://launchpad.net/swift/icehouse/1.13.0/+download/swift-1.13.0.tar.gz
https://launchpad.net/keystone/icehouse/2014.1.1/+download/keystone-2014.1.1.tar.gz
-r{toxinidir}/test-requirements.txt
commands = nosetests {posargs:swift3/test/unit}
setenv = VIRTUAL_ENV={envdir}
@ -20,6 +22,14 @@ setenv = VIRTUAL_ENV={envdir}
NOSE_WITH_COVERAGE=1
NOSE_COVER_BRANCHES=1
[testenv:tempauth]
commands = /bin/bash {posargs:swift3/test/functional/run_test.sh}
setenv = AUTH=tempauth
[testenv:keystone]
commands = /bin/bash {posargs:swift3/test/functional/run_test.sh}
setenv = AUTH=keystone
[testenv:pylint]
commands = pylint -E swift3