Release 0.4.2
This release includes: * A fix for when running on environments that don't support unicode at all, just ASCII -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCgAGBQJWDYiRAAoJEP0SoPIUyeF3ZdMP/0oRYI0LVzfKPXzuO8lJhbkO 8dx15h5Wj6Mb2gWuJEkrRj72Revh/6Z8nZ+H4TY0Z8nGSFcVz+jtz1ibyLnJx58E OCKv/gf9lMV/5ROtFl4xZLe8zcvBKr/9BxYJKMYNUHCEJgSysvmrKNgYmPT7bn+O oRznPvY287ctqSCBNbpNAyEdPhKCNIErNFMMeR4U6+OI1ngUrSHL0utHS7qNkuGp AJv/SFTmBI2oP+Tnyp+fJd5P6hpKoswU0wOLLZlll6UevHEx85FZ74HQwdJI7MCi fiLappZaPoBaWttdH/6oswCJ1PFZ4kwfwaczFwbBTJOOruix7+kqYZ5WsFNQZIKG bUgKV4GFudbL8jXNBZfQGwMVO3zokMw4e3EM4YZATVb780YfIsbS9dR+8tDhpRR4 zZcT3g5B9vj2v6itBJL+e/m1cPrYfcftiMic3QWGCdyrGdWphQYuTP33FwXcmFlp kuTvj5fKmSMdXLEi13//WTe/7uCV2mx8tDkqJubjN9QRdU3qyvtlbi/4+ZedXu56 pHQAUutVSZu+f388HsPm48VynrVnCxMJOAAO5CMCnxCvVtSzPMvAnTQbc3Q5wnjR vK2TuO3CE5geDRZMXRvrKH5q25iY4Anoel6Cwtl462LLP3mg+IAyVdKqbPZvcDro nxVOpiJnQ6TLc8cn6tld =F90f -----END PGP SIGNATURE----- Merge tag '0.4.2' into debian/mitaka Release 0.4.2 This release includes: * A fix for when running on environments that don't support unicode at all, just ASCII
This commit is contained in:
commit
dcd89d54eb
@ -4,4 +4,4 @@ source = os_testr
|
||||
omit = os_testr/tests/*,os_testr/openstack/*
|
||||
|
||||
[report]
|
||||
ignore-errors = True
|
||||
ignore_errors = True
|
||||
|
@ -17,10 +17,3 @@ Features
|
||||
* subunit-trace: an output filter for a subunit stream which provides useful
|
||||
information about the run
|
||||
* subunit2html: generates a test results html page from a subunit stream
|
||||
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
0.1.0
|
||||
-----
|
||||
* First release which includes: ostestr, subunit-trace, and subunit2html
|
||||
|
2
TODO.rst
2
TODO.rst
@ -15,3 +15,5 @@ Long Term
|
||||
* Add subunit-trace functional tests
|
||||
** Sample subunit streams and test output from subunit-trace
|
||||
* Add testing for subunit2html
|
||||
* Stop using subprocess in ostestr, everything it uses is python so there
|
||||
isn't a need to shell out for everything.
|
||||
|
@ -22,7 +22,7 @@ sys.path.insert(0, os.path.abspath('../..'))
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
# 'sphinx.ext.intersphinx',
|
||||
'oslosphinx'
|
||||
]
|
||||
|
||||
@ -38,7 +38,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'os-testr'
|
||||
copyright = u'2013, OpenStack Foundation'
|
||||
copyright = u'2015, Matthew Treinish'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
@ -68,8 +68,15 @@ latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
u'Matthew Treinish', 'manual'),
|
||||
]
|
||||
|
||||
man_pages = [('ostestr', 'ostestr', 'tooling to run OpenStack tests',
|
||||
['Matthew Treinish'], 1),
|
||||
('subunit_trace', 'subunit-trace', 'pretty output filter for '
|
||||
'subunit streams', ['Matthew Treinish'], 1),
|
||||
('subunit2html', 'subunit2html', 'generate a html results page '
|
||||
'from a subunit stream', ['Matthew Treinish'], 1)]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
||||
# intersphinx_mapping = {'http://docs.python.org/': None}
|
||||
|
1
doc/source/history.rst
Normal file
1
doc/source/history.rst
Normal file
@ -0,0 +1 @@
|
||||
.. include:: ../../ChangeLog
|
@ -16,6 +16,7 @@ Contents:
|
||||
usage
|
||||
contributing
|
||||
todo
|
||||
history
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
217
doc/source/ostestr.rst
Normal file
217
doc/source/ostestr.rst
Normal file
@ -0,0 +1,217 @@
|
||||
.. _ostestr:
|
||||
|
||||
ostestr
|
||||
=======
|
||||
|
||||
The ostestr command provides a wrapper around the testr command included in
|
||||
the testrepository package. It's designed to build on the functionality
|
||||
included in testr and workaround several UI bugs in the short term. By default
|
||||
it also has output that is much more useful for OpenStack's test suites which
|
||||
are lengthy in both runtime and number of tests. Please note that the CLI
|
||||
semantics are still a work in progress as the project is quite young, so
|
||||
default behavior might change in future version.
|
||||
|
||||
Summary
|
||||
-------
|
||||
ostestr [-b|--blacklist_file <blacklist_file>] [-r|--regex REGEX]
|
||||
[-p|--pretty] [--no-pretty] [-s|--subunit] [-l|--list]
|
||||
[-n|--no-discover <test_id>] [--slowest] [--no-slowest]
|
||||
[--pdb <test_id>] [--parallel] [--serial]
|
||||
[-c|--concurrency <workers>] [--until-failure] [--print-exclude]
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
--blacklist_file BLACKLIST_FILE, -b BLACKLIST_FILE
|
||||
Path to a blacklist file, this file contains a
|
||||
separate regex exclude on each newline
|
||||
--regex REGEX, -r REGEX
|
||||
A normal testr selection regex. If a blacklist file is
|
||||
specified, the regex will be appended to the end of
|
||||
the generated regex from that file
|
||||
--pretty, -p
|
||||
Print pretty output from subunit-trace. This is
|
||||
mutually exclusive with --subunit
|
||||
--no-pretty
|
||||
Disable the pretty output with subunit-trace
|
||||
--subunit, -s
|
||||
output the raw subunit v2 from the test run this is
|
||||
mutuall exclusive with --pretty
|
||||
--list, -l
|
||||
List all the tests which will be run.
|
||||
--no-discover TEST_ID, -n TEST_ID
|
||||
Takes in a single test to bypasses test discover and
|
||||
just excute the test specified
|
||||
--slowest
|
||||
After the test run print the slowest tests
|
||||
--no-slowest
|
||||
After the test run don't print the slowest tests
|
||||
--pdb TEST_ID
|
||||
Run a single test that has pdb traces added
|
||||
--parallel
|
||||
Run tests in parallel (this is the default)
|
||||
--serial
|
||||
Run tests serially
|
||||
--concurrency WORKERS, -c WORKERS
|
||||
The number of workers to use when running in parallel.
|
||||
By default this is the number of cpus
|
||||
--until-failure
|
||||
Run the tests in a loop until a failure is
|
||||
encountered. Running with subunit or prettyoutput
|
||||
enable will force the loop to run testsserially
|
||||
--print-exclude
|
||||
If an exclude file is used this option will prints the
|
||||
comment from the same line and all skipped tests
|
||||
before the test run
|
||||
|
||||
Running Tests
|
||||
-------------
|
||||
|
||||
os-testr is primarily for running tests at it's basic level you just invoke
|
||||
ostestr to run a test suite for a project. (assuming it's setup to run tests
|
||||
using testr already) For example::
|
||||
|
||||
$ ostestr
|
||||
|
||||
This will run tests in parallel (with the number of workers matching the number
|
||||
of CPUs) and with subunit-trace output. If you need to run tests in serial you
|
||||
can use the serial option::
|
||||
|
||||
$ ostestr --serial
|
||||
|
||||
Or if you need to adjust the concurrency but still run in parallel you can use
|
||||
-c/--concurrency::
|
||||
|
||||
$ ostestr --concurrency 2
|
||||
|
||||
If you only want to run an individual test module or more specific (a single
|
||||
class, or test) and parallel execution doesn't matter, you can use the
|
||||
-n/--no-discover to skip test discovery and just directly calls subunit.run on
|
||||
the tests under the covers. Bypassing discovery is desirable when running a
|
||||
small subset of tests in a larger test suite because the discovery time can
|
||||
often far exceed the total run time of the tests.
|
||||
|
||||
For example::
|
||||
|
||||
$ ostestr --no-discover test.test_thing.TestThing.test_thing_method
|
||||
|
||||
Additionally, if you need to run a single test module, class, or single test
|
||||
with pdb enabled you can use --pdb to directly call testtools.run under the
|
||||
covers which works with pdb. For example::
|
||||
|
||||
$ ostestr --pdb tests.test_thing.TestThing.test_thing_method
|
||||
|
||||
|
||||
Test Selection
|
||||
--------------
|
||||
|
||||
ostestr is designed to build on top of the test selection in testr. testr only
|
||||
exposed a regex option to select tests. This equivalent is functionality is
|
||||
exposed via the --regex option. For example::
|
||||
|
||||
$ ostestr --regex 'magic\.regex'
|
||||
|
||||
This will do a straight passthrough of the provided regex to testr.
|
||||
Additionally, ostestr allows you to specify a a blacklist file to define a set
|
||||
of regexes to exclude. You can specify a blacklist file with the
|
||||
--blacklist-file/-b option, for example::
|
||||
|
||||
$ ostestr --blacklist_file $path_to_file
|
||||
|
||||
The format for the file is line separated regex, with '#' used to signify the
|
||||
start of a comment on a line. For example::
|
||||
|
||||
# Blacklist File
|
||||
^regex1 # Excludes these tests
|
||||
.*regex2 # exclude those tests
|
||||
|
||||
Will generate a regex to pass to testr which will exclude both any tests
|
||||
matching '^regex1' and '.*regex2'. If a blacklist file is used in conjunction
|
||||
with the --regex option the regex specified with --regex will be appended to
|
||||
the generated output from the --blacklist_file. Also it's worth noting that the
|
||||
regex test selection options can not be used in conjunction with the
|
||||
--no-discover or --pdb options described in the previous section. This is
|
||||
because the regex selection requires using testr under the covers to actually
|
||||
do the filtering, and those 2 options do not use testr.
|
||||
|
||||
It's also worth noting that you can use the test list option to dry run any
|
||||
selection arguments you are using. You just need to use --list/-l with your
|
||||
selection options to do this, for example::
|
||||
|
||||
$ ostestr --regex 'regex3.*' --blacklist_file blacklist.txt --list
|
||||
|
||||
This will list all the tests which will be run by ostestr using that combination
|
||||
of arguments.
|
||||
|
||||
Please not that all of this selection functionality will be expanded on in the
|
||||
future and a default grammar for selecting multiple tests will be chosen in a
|
||||
future release. However as of right now all current arguments (which have
|
||||
guarantees on always remaining in place) are still required to perform any
|
||||
selection logic while this functionality is still under development.
|
||||
|
||||
|
||||
Output Options
|
||||
--------------
|
||||
|
||||
By default ostestr will use subunit-trace as the output filter on the test
|
||||
run. It will also print the slowest tests from the run after the run is
|
||||
concluded. You can disable the printing the slowest tests with the --no-slowest
|
||||
flag, for example::
|
||||
|
||||
$ ostestr --no-slowest
|
||||
|
||||
If you'd like to disable the subunit-trace output you can do this using
|
||||
--no-pretty::
|
||||
|
||||
$ ostestr --no-pretty
|
||||
|
||||
ostestr also provides the option to just output the raw subunit stream on
|
||||
STDOUT with --subunit/-s. Note if you want to use this you also have to
|
||||
specify --no-pretty as the subunit-trace output and the raw subunit output
|
||||
are mutually exclusive. For example, to get raw subunit output the arguments
|
||||
would be::
|
||||
|
||||
$ ostestr --no-pretty --subunit
|
||||
|
||||
An additional option on top of the blacklist file is --print-exclude option.
|
||||
When this option is specified when using a blacklist file before the tests are
|
||||
run ostestr will print all the tests it will be excluding from the blacklist
|
||||
file. If a line in the blacklist file has a comment that will be printed before
|
||||
listing the tests which will be excluded by that line's regex. If no comment is
|
||||
present on a line the regex from that line will be used instead. For example,
|
||||
if you were using the example blacklist file from the previous section the
|
||||
output before the regular test run output would be::
|
||||
|
||||
$ ostestr -b blacklist-file blacklist.txt --print-exclude
|
||||
Excludes these tests
|
||||
regex1_match
|
||||
regex1_exclude
|
||||
|
||||
exclude those tests
|
||||
regex2_match
|
||||
regex2_exclude
|
||||
|
||||
...
|
||||
|
||||
Notes for running with tox
|
||||
--------------------------
|
||||
|
||||
If you use `tox`_ for running your tests and call ostestr as the test command
|
||||
.. _tox: https://tox.readthedocs.org/en/latest/
|
||||
it's recommended that you set a posargs following ostestr on the commands
|
||||
stanza. For example::
|
||||
|
||||
[testenv]
|
||||
commands = ostestr {posargs}
|
||||
|
||||
this will enable end users to pass args to configure the output, use the
|
||||
selection logic, or any other options directly from the tox cli. This will let
|
||||
tox take care of the venv management and the environment separation but enable
|
||||
direct access to all of the ostestr options to easily customize your test run.
|
||||
For example, assuming the above posargs usage you would be to do::
|
||||
|
||||
$ tox -epy34 -- --regex ^regex1
|
||||
|
||||
or to skip discovery::
|
||||
|
||||
$ tox -epy34 -- -n test.test_thing.TestThing.test_thing_method
|
33
doc/source/subunit2html.rst
Normal file
33
doc/source/subunit2html.rst
Normal file
@ -0,0 +1,33 @@
|
||||
.. _subunit2html:
|
||||
|
||||
subunit2html
|
||||
============
|
||||
|
||||
subunit2html is a tool that takes in a subunit stream file and will output an
|
||||
html page
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
subunit2html subunit_stream [output]
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
subunit2html takes in 1 mandatory argument. This is used to specify the location
|
||||
of the subunit stream file. For example::
|
||||
|
||||
$ subunit2html subunit_stream
|
||||
|
||||
By default subunit2html will store the generated html results file at
|
||||
results.html file in the current working directory.
|
||||
|
||||
An optional second argument can be provided to set the output path of the html
|
||||
results file that is generated. If it is provided this will be the output path
|
||||
for saving the generated file, otherwise results.html in the current working
|
||||
directory will be used. For example::
|
||||
|
||||
$ subunit2html subunit_stream test_results.html
|
||||
|
||||
will write the generated html results file to test_results.html in the current
|
||||
working directory
|
110
doc/source/subunit_trace.rst
Normal file
110
doc/source/subunit_trace.rst
Normal file
@ -0,0 +1,110 @@
|
||||
.. _subunit_trace:
|
||||
|
||||
subunit-trace
|
||||
=============
|
||||
|
||||
subunit-trace is an output filter for subunit streams. It is often used in
|
||||
conjunction with test runners that emit subunit to enable a consistent and
|
||||
useful realtime output from a test run.
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
subunit-trace [--fails|-f] [--failonly] [--perc-diff|-d] [--no-summary]
|
||||
[--diff-threshold|-t <threshold>]
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
--no-failure-debug, -n
|
||||
Disable printing failure debug information in realtime
|
||||
--fails, -f
|
||||
Print failure debug information after the stream is
|
||||
proccesed
|
||||
--failonly
|
||||
Don't print success items
|
||||
--perc-diff, -d
|
||||
Print percent change in run time on each test
|
||||
--diff-threshold THRESHOLD, -t THRESHOLD
|
||||
Threshold to use for displaying percent change from the
|
||||
avg run time. If one is not specified the percent
|
||||
change will always be displayed.
|
||||
--no-summary
|
||||
Don't print the summary of the test run after completes
|
||||
|
||||
Usage
|
||||
-----
|
||||
subunit-trace will take a subunit stream in via STDIN. This is the only input
|
||||
into the tool. It will then print on STDOUT the formatted test result output
|
||||
for the test run information contained in the stream.
|
||||
|
||||
A subunit v2 stream must be passed into subunit-trace. If only a subunit v1
|
||||
stream is available you must use the subunit-1to2 utility to convert it before
|
||||
passing the stream into subunit-trace. For example this can be done by chaining
|
||||
pipes::
|
||||
|
||||
$ cat subunit_v1 | subunit-1to2 | subunit-trace
|
||||
|
||||
Adjusting per test output
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
subunit-trace provides several options to customize it's output. This allows
|
||||
users to customize the output from subunit-trace to suit their needs. The output
|
||||
from subunit-trace basically comes in 2 parts, the per test output, and the
|
||||
summary at the end. By default subunit-trace will print failure messages during
|
||||
the per test output, meaning when a test fails it will also print the message
|
||||
and any traceback and other attachments at that time. However this can be
|
||||
disabled by using --no-failure-debug, -n. For example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --no-failure-debug
|
||||
|
||||
Rhere is also the option to print all failures together at the end of the test
|
||||
run before the summary view. This is done using the --fails/-f option. For
|
||||
example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --fails
|
||||
|
||||
Often the --fails and --no-failure-debug options are used in conjunction to
|
||||
only print failures at the end of a test run. This is useful for large test
|
||||
suites where an error message might be lost in the noise. To do this ::
|
||||
|
||||
$ testr run --subunit | subunit-trace --fails --no-failure-debug
|
||||
|
||||
By default subunit-trace will print a line for each test after it completes with
|
||||
the test status. However, if you only want to see the run time output for
|
||||
failures and not any other test status you can use the --failonly option. For
|
||||
example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --failonly
|
||||
|
||||
The last output option provided by subunit-trace is to diable the summary view
|
||||
of the test run which is normally displayed at the end of a run. You can do
|
||||
this using the --no-summary option. For example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --no-summary
|
||||
|
||||
|
||||
Show per test run time percent change
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
subunit-trace provides an option to display the percent change in run time
|
||||
from the previous run. To do this subunit-trace leverages the testr internals
|
||||
a bit. It uses the times.dbm database which, the file repository type in
|
||||
testrepository will create, to get the previous run time for a test. If testr
|
||||
hasn't ever been used before or for whatever reason subunit-trace is unable to
|
||||
find the times.dbm file from testr no percentages will be displayed even if it's
|
||||
enabled. Additionally, if a test is run which does not have an entry in the
|
||||
times.dbm file will not have a percentage printed for it.
|
||||
|
||||
To enable this feature you use --perc-diff/-d, for example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --perc-diff
|
||||
|
||||
There is also the option to set a threshold value for this option. If used it
|
||||
acts as an absolute value and only percentage changes that exceed it will be
|
||||
printed. Use the --diff-threshold/-t option to set a threshold, for example::
|
||||
|
||||
$ testr run --subunit | subunit-trace --perc-diff --threshold 45
|
||||
|
||||
This will only display percent differences when the change in run time is either
|
||||
>=45% faster or <=45% slower.
|
@ -1,7 +1,12 @@
|
||||
========
|
||||
=====
|
||||
Usage
|
||||
========
|
||||
=====
|
||||
|
||||
To use os-testr in a project::
|
||||
This section contains the documentation for each of tools packaged in os-testr
|
||||
|
||||
import os_testr
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
ostestr
|
||||
subunit_trace
|
||||
subunit2html
|
||||
|
@ -23,41 +23,54 @@ from subunit import run as subunit_run
|
||||
from testtools import run as testtools_run
|
||||
|
||||
|
||||
def parse_args():
|
||||
def get_parser(args):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Tool to run openstack tests')
|
||||
parser.add_argument('--blacklist_file', '-b',
|
||||
help='Path to a blacklist file, this file contains a'
|
||||
' separate regex exclude on each newline')
|
||||
parser.add_argument('--regex', '-r',
|
||||
help='A normal testr selection regex. If a blacklist '
|
||||
'file is specified, the regex will be appended '
|
||||
'to the end of the generated regex from that '
|
||||
'file')
|
||||
parser.add_argument('--pretty', '-p', dest='pretty', action='store_true',
|
||||
list_files = parser.add_mutually_exclusive_group()
|
||||
list_files.add_argument('--blacklist_file', '-b',
|
||||
help='Path to a blacklist file, this file '
|
||||
'contains a separate regex exclude on each '
|
||||
'newline')
|
||||
list_files.add_argument('--whitelist_file', '-w',
|
||||
help='Path to a whitelist file, this file '
|
||||
'contains a separate regex on each newline.')
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument('--regex', '-r',
|
||||
help='A normal testr selection regex. If a blacklist '
|
||||
'file is specified, the regex will be appended '
|
||||
'to the end of the generated regex from that '
|
||||
'file.')
|
||||
group.add_argument('--path', metavar='FILE_OR_DIRECTORY',
|
||||
help='A file name or directory of tests to run.')
|
||||
group.add_argument('--no-discover', '-n', metavar='TEST_ID',
|
||||
help="Takes in a single test to bypasses test "
|
||||
"discover and just excute the test specified. "
|
||||
"A file name may be used in place of a test "
|
||||
"name.")
|
||||
pretty = parser.add_mutually_exclusive_group()
|
||||
pretty.add_argument('--pretty', '-p', dest='pretty', action='store_true',
|
||||
help='Print pretty output from subunit-trace. This is '
|
||||
'mutually exclusive with --subunit')
|
||||
parser.add_argument('--no-pretty', dest='pretty', action='store_false',
|
||||
pretty.add_argument('--no-pretty', dest='pretty', action='store_false',
|
||||
help='Disable the pretty output with subunit-trace')
|
||||
parser.add_argument('--subunit', '-s', action='store_true',
|
||||
help='output the raw subunit v2 from the test run '
|
||||
'this is mutuall exclusive with --pretty')
|
||||
'this is mutually exclusive with --pretty')
|
||||
parser.add_argument('--list', '-l', action='store_true',
|
||||
help='List all the tests which will be run.')
|
||||
parser.add_argument('--no-discover', '-n', metavar='TEST_ID',
|
||||
help="Takes in a single test to bypasses test "
|
||||
"discover and just excute the test specified")
|
||||
parser.add_argument('--slowest', dest='slowest', action='store_true',
|
||||
help="after the test run print the slowest tests")
|
||||
parser.add_argument('--no-slowest', dest='slowest', action='store_false',
|
||||
help="after the test run don't print the slowest "
|
||||
"tests")
|
||||
slowest = parser.add_mutually_exclusive_group()
|
||||
slowest.add_argument('--slowest', dest='slowest', action='store_true',
|
||||
help="after the test run print the slowest tests")
|
||||
slowest.add_argument('--no-slowest', dest='slowest', action='store_false',
|
||||
help="after the test run don't print the slowest "
|
||||
"tests")
|
||||
parser.add_argument('--pdb', metavar='TEST_ID',
|
||||
help='Run a single test that has pdb traces added')
|
||||
parser.add_argument('--parallel', dest='parallel', action='store_true',
|
||||
help='Run tests in parallel (this is the default)')
|
||||
parser.add_argument('--serial', dest='parallel', action='store_false',
|
||||
help='Run tests serially')
|
||||
parallel = parser.add_mutually_exclusive_group()
|
||||
parallel.add_argument('--parallel', dest='parallel', action='store_true',
|
||||
help='Run tests in parallel (this is the default)')
|
||||
parallel.add_argument('--serial', dest='parallel', action='store_false',
|
||||
help='Run tests serially')
|
||||
parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS',
|
||||
help='The number of workers to use when running in '
|
||||
'parallel. By default this is the number of cpus')
|
||||
@ -71,8 +84,7 @@ def parse_args():
|
||||
'prints the comment from the same line and all '
|
||||
'skipped tests before the test run')
|
||||
parser.set_defaults(pretty=True, slowest=True, parallel=True)
|
||||
opts = parser.parse_args()
|
||||
return opts
|
||||
return parser.parse_args(args)
|
||||
|
||||
|
||||
def _get_test_list(regex, env=None):
|
||||
@ -112,7 +124,16 @@ def print_skips(regex, message):
|
||||
print('\n')
|
||||
|
||||
|
||||
def construct_regex(blacklist_file, regex, print_exclude):
|
||||
def path_to_regex(path):
|
||||
root, _ = os.path.splitext(path)
|
||||
return root.replace('/', '.')
|
||||
|
||||
|
||||
def get_regex_from_whitelist_file(file_path):
|
||||
return '|'.join(open(file_path).read().splitlines())
|
||||
|
||||
|
||||
def construct_regex(blacklist_file, whitelist_file, regex, print_exclude):
|
||||
if not blacklist_file:
|
||||
exclude_regex = ''
|
||||
else:
|
||||
@ -122,17 +143,25 @@ def construct_regex(blacklist_file, regex, print_exclude):
|
||||
raw_line = line.strip()
|
||||
split_line = raw_line.split('#')
|
||||
# Before the # is the regex
|
||||
regex = split_line[0].strip()
|
||||
# After the # is a comment
|
||||
comment = split_line[1].strip()
|
||||
if regex:
|
||||
line_regex = split_line[0].strip()
|
||||
if len(split_line) > 1:
|
||||
# After the # is a comment
|
||||
comment = split_line[1].strip()
|
||||
else:
|
||||
comment = ''
|
||||
if line_regex:
|
||||
if print_exclude:
|
||||
print_skips(regex, comment)
|
||||
exclude_regex = '|'.join([regex, exclude_regex])
|
||||
print_skips(line_regex, comment)
|
||||
if exclude_regex:
|
||||
exclude_regex = '|'.join([line_regex, exclude_regex])
|
||||
else:
|
||||
exclude_regex = line_regex
|
||||
if exclude_regex:
|
||||
exclude_regex = "'(?!.*" + exclude_regex + ")"
|
||||
exclude_regex = "^((?!" + exclude_regex + ").)*$"
|
||||
if regex:
|
||||
exclude_regex += regex
|
||||
if whitelist_file:
|
||||
exclude_regex += '%s' % get_regex_from_whitelist_file(whitelist_file)
|
||||
return exclude_regex
|
||||
|
||||
|
||||
@ -224,12 +253,25 @@ def call_subunit_run(test_id, pretty, subunit):
|
||||
testtools_run.main([sys.argv[0], test_id], sys.stdout)
|
||||
|
||||
|
||||
def call_testtools_run(test_id):
|
||||
testtools_run.main([sys.argv[0], test_id], sys.stdout)
|
||||
def _select_and_call_runner(opts, exclude_regex):
|
||||
ec = 1
|
||||
if not os.path.isdir('.testrepository'):
|
||||
subprocess.call(['testr', 'init'])
|
||||
|
||||
if not opts.no_discover and not opts.pdb:
|
||||
ec = call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
|
||||
opts.slowest, opts.parallel, opts.concurrency,
|
||||
opts.until_failure)
|
||||
else:
|
||||
test_to_run = opts.no_discover or opts.pdb
|
||||
if test_to_run.find('/') != -1:
|
||||
test_to_run = path_to_regex(test_to_run)
|
||||
ec = call_subunit_run(test_to_run, opts.pretty, opts.subunit)
|
||||
return ec
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
opts = get_parser(sys.argv[1:])
|
||||
if opts.pretty and opts.subunit:
|
||||
msg = ('Subunit output and pretty output cannot be specified at the '
|
||||
'same time')
|
||||
@ -248,18 +290,15 @@ def main():
|
||||
msg = "You can not use until_failure mode with pdb or no-discover"
|
||||
print(msg)
|
||||
exit(5)
|
||||
exclude_regex = construct_regex(opts.blacklist_file, opts.regex,
|
||||
opts.print_exclude)
|
||||
if not os.path.isdir('.testrepository'):
|
||||
subprocess.call(['testr', 'init'])
|
||||
if not opts.no_discover and not opts.pdb:
|
||||
exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
|
||||
opts.slowest, opts.parallel, opts.concurrency,
|
||||
opts.until_failure))
|
||||
elif opts.pdb:
|
||||
exit(call_testtools_run(opts.pdb))
|
||||
if opts.path:
|
||||
regex = path_to_regex(opts.path)
|
||||
else:
|
||||
exit(call_subunit_run(opts.no_discover, opts.pretty, opts.subunit))
|
||||
regex = opts.regex
|
||||
exclude_regex = construct_regex(opts.blacklist_file,
|
||||
opts.whitelist_file,
|
||||
regex,
|
||||
opts.print_exclude)
|
||||
exit(_select_and_call_runner(opts, exclude_regex))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -120,6 +120,7 @@ def print_attachments(stream, test, all_channels=False):
|
||||
# indent attachment lines 4 spaces to make them visually
|
||||
# offset
|
||||
for line in detail.as_text().split('\n'):
|
||||
line = line.encode('utf8')
|
||||
stream.write(" %s\n" % line)
|
||||
|
||||
|
||||
@ -147,7 +148,7 @@ def find_test_run_time_diff(test_id, run_time):
|
||||
|
||||
|
||||
def show_outcome(stream, test, print_failures=False, failonly=False,
|
||||
enable_diff=False, threshold='0'):
|
||||
enable_diff=False, threshold='0', abbreviate=False):
|
||||
global RESULTS
|
||||
status = test['status']
|
||||
# TODO(sdague): ask lifeless why on this?
|
||||
@ -168,30 +169,45 @@ def show_outcome(stream, test, print_failures=False, failonly=False,
|
||||
|
||||
if status == 'fail':
|
||||
FAILS.append(test)
|
||||
stream.write('{%s} %s [%s] ... FAILED\n' % (
|
||||
worker, name, duration))
|
||||
if not print_failures:
|
||||
print_attachments(stream, test, all_channels=True)
|
||||
elif not failonly:
|
||||
if status == 'success':
|
||||
out_string = '{%s} %s [%s' % (worker, name, duration)
|
||||
perc_diff = find_test_run_time_diff(test['id'], duration)
|
||||
if enable_diff:
|
||||
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
|
||||
if perc_diff > 0:
|
||||
out_string = out_string + ' +%.2f%%' % perc_diff
|
||||
else:
|
||||
out_string = out_string + ' %.2f%%' % perc_diff
|
||||
stream.write(out_string + '] ... ok\n')
|
||||
print_attachments(stream, test)
|
||||
elif status == 'skip':
|
||||
stream.write('{%s} %s ... SKIPPED: %s\n' % (
|
||||
worker, name, test['details']['reason'].as_text()))
|
||||
if abbreviate:
|
||||
stream.write('F')
|
||||
else:
|
||||
stream.write('{%s} %s [%s] ... %s\n' % (
|
||||
worker, name, duration, test['status']))
|
||||
stream.write('{%s} %s [%s] ... FAILED\n' % (
|
||||
worker, name, duration))
|
||||
if not print_failures:
|
||||
print_attachments(stream, test, all_channels=True)
|
||||
elif not failonly:
|
||||
if status == 'success':
|
||||
if abbreviate:
|
||||
stream.write('.')
|
||||
else:
|
||||
out_string = '{%s} %s [%s' % (worker, name, duration)
|
||||
perc_diff = find_test_run_time_diff(test['id'], duration)
|
||||
if enable_diff:
|
||||
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
|
||||
if perc_diff > 0:
|
||||
out_string = out_string + ' +%.2f%%' % perc_diff
|
||||
else:
|
||||
out_string = out_string + ' %.2f%%' % perc_diff
|
||||
stream.write(out_string + '] ... ok\n')
|
||||
print_attachments(stream, test)
|
||||
elif status == 'skip':
|
||||
if abbreviate:
|
||||
stream.write('S')
|
||||
else:
|
||||
reason = test['details'].get('reason', '')
|
||||
if reason:
|
||||
reason = ': ' + reason.as_text()
|
||||
stream.write('{%s} %s ... SKIPPED%s\n' % (
|
||||
worker, name, reason))
|
||||
else:
|
||||
if abbreviate:
|
||||
stream.write('%s' % test['status'][0])
|
||||
else:
|
||||
stream.write('{%s} %s [%s] ... %s\n' % (
|
||||
worker, name, duration, test['status']))
|
||||
if not print_failures:
|
||||
print_attachments(stream, test, all_channels=True)
|
||||
|
||||
stream.flush()
|
||||
|
||||
@ -239,8 +255,13 @@ def run_time():
|
||||
def worker_stats(worker):
|
||||
tests = RESULTS[worker]
|
||||
num_tests = len(tests)
|
||||
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
|
||||
return num_tests, delta
|
||||
stop_time = tests[-1]['timestamps'][1]
|
||||
start_time = tests[0]['timestamps'][0]
|
||||
if not start_time or not stop_time:
|
||||
delta = 'N/A'
|
||||
else:
|
||||
delta = stop_time - start_time
|
||||
return num_tests, str(delta)
|
||||
|
||||
|
||||
def print_summary(stream, elapsed_time):
|
||||
@ -266,8 +287,11 @@ def print_summary(stream, elapsed_time):
|
||||
"Race in testr accounting.\n" % w)
|
||||
else:
|
||||
num, time = worker_stats(w)
|
||||
stream.write(" - Worker %s (%s tests) => %ss\n" %
|
||||
(w, num, time))
|
||||
out_str = " - Worker %s (%s tests) => %s" % (w, num, time)
|
||||
if time.isdigit():
|
||||
out_str += 's'
|
||||
out_str += '\n'
|
||||
stream.write(out_str)
|
||||
|
||||
|
||||
def parse_args():
|
||||
@ -283,6 +307,9 @@ def parse_args():
|
||||
default=(
|
||||
os.environ.get('TRACE_FAILONLY', False)
|
||||
is not False))
|
||||
parser.add_argument('--abbreviate', '-a', action='store_true',
|
||||
dest='abbreviate', help='Print one character status'
|
||||
'for each test')
|
||||
parser.add_argument('--perc-diff', '-d', action='store_true',
|
||||
dest='enable_diff',
|
||||
help="Print percent change in run time on each test ")
|
||||
@ -304,7 +331,8 @@ def main():
|
||||
functools.partial(show_outcome, sys.stdout,
|
||||
print_failures=args.print_failures,
|
||||
failonly=args.failonly,
|
||||
enable_diff=args.enable_diff))
|
||||
enable_diff=args.enable_diff,
|
||||
abbreviate=args.abbreviate))
|
||||
summary = testtools.StreamSummary()
|
||||
result = testtools.CopyStreamResult([outcomes, summary])
|
||||
result = testtools.StreamResultRouter(result)
|
||||
|
@ -19,10 +19,238 @@ test_os_testr
|
||||
Tests for `os_testr` module.
|
||||
"""
|
||||
|
||||
import mock
|
||||
import six
|
||||
|
||||
from os_testr import os_testr
|
||||
from os_testr.tests import base
|
||||
|
||||
|
||||
class TestOs_testr(base.TestCase):
|
||||
class TestPathToRegex(base.TestCase):
|
||||
|
||||
def test_something(self):
|
||||
pass
|
||||
def test_file_name(self):
|
||||
result = os_testr.path_to_regex("tests/network/v2/test_net.py")
|
||||
self.assertEqual("tests.network.v2.test_net", result)
|
||||
result = os_testr.path_to_regex("openstack/tests/network/v2")
|
||||
self.assertEqual("openstack.tests.network.v2", result)
|
||||
|
||||
|
||||
class TestGetParser(base.TestCase):
|
||||
def test_pretty(self):
|
||||
namespace = os_testr.get_parser(['--pretty'])
|
||||
self.assertEqual(True, namespace.pretty)
|
||||
namespace = os_testr.get_parser(['--no-pretty'])
|
||||
self.assertEqual(False, namespace.pretty)
|
||||
self.assertRaises(SystemExit, os_testr.get_parser,
|
||||
['--no-pretty', '--pretty'])
|
||||
|
||||
def test_slowest(self):
|
||||
namespace = os_testr.get_parser(['--slowest'])
|
||||
self.assertEqual(True, namespace.slowest)
|
||||
namespace = os_testr.get_parser(['--no-slowest'])
|
||||
self.assertEqual(False, namespace.slowest)
|
||||
self.assertRaises(SystemExit, os_testr.get_parser,
|
||||
['--no-slowest', '--slowest'])
|
||||
|
||||
def test_parallel(self):
|
||||
namespace = os_testr.get_parser(['--parallel'])
|
||||
self.assertEqual(True, namespace.parallel)
|
||||
namespace = os_testr.get_parser(['--serial'])
|
||||
self.assertEqual(False, namespace.parallel)
|
||||
self.assertRaises(SystemExit, os_testr.get_parser,
|
||||
['--parallel', '--serial'])
|
||||
|
||||
|
||||
class TestCallers(base.TestCase):
|
||||
def test_no_discover(self):
|
||||
namespace = os_testr.get_parser(['-n', 'project.tests.foo'])
|
||||
|
||||
def _fake_exit(arg):
|
||||
self.assertTrue(arg)
|
||||
|
||||
def _fake_run(*args, **kwargs):
|
||||
return 'project.tests.foo' in args
|
||||
|
||||
with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
|
||||
mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
|
||||
mock.patch.object(os_testr,
|
||||
'call_subunit_run',
|
||||
side_effect=_fake_run):
|
||||
os_testr.main()
|
||||
|
||||
def test_no_discover_path(self):
|
||||
namespace = os_testr.get_parser(['-n', 'project/tests/foo'])
|
||||
|
||||
def _fake_exit(arg):
|
||||
self.assertTrue(arg)
|
||||
|
||||
def _fake_run(*args, **kwargs):
|
||||
return 'project.tests.foo' in args
|
||||
|
||||
with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
|
||||
mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
|
||||
mock.patch.object(os_testr,
|
||||
'call_subunit_run',
|
||||
side_effect=_fake_run):
|
||||
os_testr.main()
|
||||
|
||||
def test_pdb(self):
|
||||
namespace = os_testr.get_parser(['--pdb', 'project.tests.foo'])
|
||||
|
||||
def _fake_exit(arg):
|
||||
self.assertTrue(arg)
|
||||
|
||||
def _fake_run(*args, **kwargs):
|
||||
return 'project.tests.foo' in args
|
||||
|
||||
with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
|
||||
mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
|
||||
mock.patch.object(os_testr,
|
||||
'call_subunit_run',
|
||||
side_effect=_fake_run):
|
||||
os_testr.main()
|
||||
|
||||
def test_pdb_path(self):
|
||||
namespace = os_testr.get_parser(['--pdb', 'project/tests/foo'])
|
||||
|
||||
def _fake_exit(arg):
|
||||
self.assertTrue(arg)
|
||||
|
||||
def _fake_run(*args, **kwargs):
|
||||
return 'project.tests.foo' in args
|
||||
|
||||
with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
|
||||
mock.patch.object(os_testr, 'get_parser', return_value=namespace), \
|
||||
mock.patch.object(os_testr,
|
||||
'call_subunit_run',
|
||||
side_effect=_fake_run):
|
||||
os_testr.main()
|
||||
|
||||
|
||||
class TestConstructRegex(base.TestCase):
|
||||
def test_regex_passthrough(self):
|
||||
result = os_testr.construct_regex(None, None, 'fake_regex', False)
|
||||
self.assertEqual(result, 'fake_regex')
|
||||
|
||||
def test_blacklist_regex_with_comments(self):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s # A Comment\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None, None, False)
|
||||
self.assertEqual(
|
||||
result,
|
||||
"^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
|
||||
|
||||
def test_blacklist_regex_without_comments(self):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None, None, False)
|
||||
self.assertEqual(
|
||||
result,
|
||||
"^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
|
||||
|
||||
def test_blacklist_regex_with_comments_and_regex(self):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s # Comments\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None,
|
||||
'fake_regex', False)
|
||||
|
||||
expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
|
||||
"fake_regex_0).)*$fake_regex")
|
||||
self.assertEqual(result, expected_regex)
|
||||
|
||||
def test_blacklist_regex_without_comments_and_regex(self):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None,
|
||||
'fake_regex', False)
|
||||
|
||||
expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
|
||||
"fake_regex_0).)*$fake_regex")
|
||||
self.assertEqual(result, expected_regex)
|
||||
|
||||
@mock.patch.object(os_testr, 'print_skips')
|
||||
def test_blacklist_regex_with_comment_print_skips(self, print_mock):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s # Comment\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None,
|
||||
None, True)
|
||||
|
||||
expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
|
||||
"fake_regex_0).)*$")
|
||||
self.assertEqual(result, expected_regex)
|
||||
calls = print_mock.mock_calls
|
||||
self.assertEqual(len(calls), 4)
|
||||
args = list(map(lambda x: x[1], calls))
|
||||
self.assertIn(('fake_regex_0', 'Comment'), args)
|
||||
self.assertIn(('fake_regex_1', 'Comment'), args)
|
||||
self.assertIn(('fake_regex_2', 'Comment'), args)
|
||||
self.assertIn(('fake_regex_3', 'Comment'), args)
|
||||
|
||||
@mock.patch.object(os_testr, 'print_skips')
|
||||
def test_blacklist_regex_without_comment_print_skips(self, print_mock):
|
||||
blacklist_file = six.StringIO()
|
||||
for i in range(4):
|
||||
blacklist_file.write('fake_regex_%s\n' % i)
|
||||
blacklist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=blacklist_file):
|
||||
result = os_testr.construct_regex('fake_path', None,
|
||||
None, True)
|
||||
|
||||
expected_regex = ("^((?!fake_regex_3|fake_regex_2|"
|
||||
"fake_regex_1|fake_regex_0).)*$")
|
||||
self.assertEqual(result, expected_regex)
|
||||
calls = print_mock.mock_calls
|
||||
self.assertEqual(len(calls), 4)
|
||||
args = list(map(lambda x: x[1], calls))
|
||||
self.assertIn(('fake_regex_0', ''), args)
|
||||
self.assertIn(('fake_regex_1', ''), args)
|
||||
self.assertIn(('fake_regex_2', ''), args)
|
||||
self.assertIn(('fake_regex_3', ''), args)
|
||||
|
||||
|
||||
class TestWhitelistFile(base.TestCase):
|
||||
def test_read_whitelist_file(self):
|
||||
file_contents = """regex_a
|
||||
regex_b"""
|
||||
whitelist_file = six.StringIO()
|
||||
whitelist_file.write(file_contents)
|
||||
whitelist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=whitelist_file):
|
||||
regex = os_testr.get_regex_from_whitelist_file('/path/to/not_used')
|
||||
self.assertEqual('regex_a|regex_b', regex)
|
||||
|
||||
def test_whitelist_regex_without_comments_and_regex(self):
|
||||
file_contents = """regex_a
|
||||
regex_b"""
|
||||
whitelist_file = six.StringIO()
|
||||
whitelist_file.write(file_contents)
|
||||
whitelist_file.seek(0)
|
||||
with mock.patch('six.moves.builtins.open',
|
||||
return_value=whitelist_file):
|
||||
result = os_testr.construct_regex(None, 'fake_path',
|
||||
None, False)
|
||||
|
||||
expected_regex = 'regex_a|regex_b'
|
||||
self.assertEqual(result, expected_regex)
|
||||
|
@ -2,8 +2,8 @@
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=0.6,!=0.7,<1.0
|
||||
pbr>=1.3,<2.0
|
||||
Babel>=1.3
|
||||
testrepository>=0.0.18
|
||||
python-subunit>=0.0.18
|
||||
testtools>=0.9.36,!=1.2.0
|
||||
testtools>=1.4.0
|
||||
|
Loading…
x
Reference in New Issue
Block a user