Imported Upstream version 0.1.0
This commit is contained in:
commit
5ce2590137
7
.coveragerc
Normal file
7
.coveragerc
Normal file
@ -0,0 +1,7 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = os_testr
|
||||
omit = os_testr/tests/*,os_testr/openstack/*
|
||||
|
||||
[report]
|
||||
ignore-errors = True
|
3
.mailmap
Normal file
3
.mailmap
Normal file
@ -0,0 +1,3 @@
|
||||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
7
.testr.conf
Normal file
7
.testr.conf
Normal file
@ -0,0 +1,7 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
16
CONTRIBUTING.rst
Normal file
16
CONTRIBUTING.rst
Normal file
@ -0,0 +1,16 @@
|
||||
If you would like to contribute to the development of OpenStack,
|
||||
you must follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
Once those steps have been completed, changes to OpenStack
|
||||
should be submitted for review via the Gerrit tool, following
|
||||
the workflow documented at:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/os-testr
|
22
ChangeLog
Normal file
22
ChangeLog
Normal file
@ -0,0 +1,22 @@
|
||||
CHANGES
|
||||
=======
|
||||
|
||||
0.1.0
|
||||
-----
|
||||
|
||||
* Fix pep8 issues and add apache header to subunit2html
|
||||
* Flush out the readme in preparation for the first release
|
||||
* Add subunit2html from jenkins slave scripts
|
||||
* Ensure failure is printed with --until-failure in pretty mode
|
||||
* Add --until-failure option to ostestr
|
||||
* Add basic unit tests to check ostestr return codes
|
||||
* Add percent change to duration on subunit-trace output
|
||||
* Use python APIs to call run modules in ostestr
|
||||
* Add ostestr options to control parallelism
|
||||
* Fix the testr init subprocess call
|
||||
* Improve the arguments for ostestr
|
||||
* Fix pep8 issues
|
||||
* Fix return code on test run failure
|
||||
* Add --pdb flag and fix --no-discover flag
|
||||
* Add subunit-trace and ostestr
|
||||
* Initial Cookiecutter Commit
|
4
HACKING.rst
Normal file
4
HACKING.rst
Normal file
@ -0,0 +1,4 @@
|
||||
os-testr Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
176
LICENSE
Normal file
176
LICENSE
Normal file
@ -0,0 +1,176 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
6
MANIFEST.in
Normal file
6
MANIFEST.in
Normal file
@ -0,0 +1,6 @@
|
||||
include AUTHORS
|
||||
include ChangeLog
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
|
||||
global-exclude *.pyc
|
49
PKG-INFO
Normal file
49
PKG-INFO
Normal file
@ -0,0 +1,49 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: os-testr
|
||||
Version: 0.1.0
|
||||
Summary: A testr wrapper to provide functionality for OpenStack projects
|
||||
Home-page: http://www.openstack.org/
|
||||
Author: OpenStack
|
||||
Author-email: openstack-dev@lists.openstack.org
|
||||
License: UNKNOWN
|
||||
Description: ===============================
|
||||
os-testr
|
||||
===============================
|
||||
|
||||
A testr wrapper to provide functionality for OpenStack projects
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/os-testr
|
||||
* Source: http://git.openstack.org/cgit/openstack/os-testr
|
||||
* Bugs: http://bugs.launchpad.net/os-testr
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* ostestr: a testr wrapper that uses subunit-trace for output and builds some
|
||||
helpful extra functionality around testr
|
||||
* subunit-trace: an output filter for a subunit stream which provides useful
|
||||
information about the run
|
||||
* subunit2html: generates a test results html page from a subunit stream
|
||||
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
0.1.0
|
||||
-----
|
||||
* First release which includes: ostestr, subunit-trace, and subunit2html
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
||||
Classifier: Environment :: OpenStack
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: Apache Software License
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
26
README.rst
Normal file
26
README.rst
Normal file
@ -0,0 +1,26 @@
|
||||
===============================
|
||||
os-testr
|
||||
===============================
|
||||
|
||||
A testr wrapper to provide functionality for OpenStack projects
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/os-testr
|
||||
* Source: http://git.openstack.org/cgit/openstack/os-testr
|
||||
* Bugs: http://bugs.launchpad.net/os-testr
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* ostestr: a testr wrapper that uses subunit-trace for output and builds some
|
||||
helpful extra functionality around testr
|
||||
* subunit-trace: an output filter for a subunit stream which provides useful
|
||||
information about the run
|
||||
* subunit2html: generates a test results html page from a subunit stream
|
||||
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
0.1.0
|
||||
-----
|
||||
* First release which includes: ostestr, subunit-trace, and subunit2html
|
75
doc/source/conf.py
Executable file
75
doc/source/conf.py
Executable file
@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'oslosphinx'
|
||||
]
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'os-testr'
|
||||
copyright = u'2013, OpenStack Foundation'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
u'%s Documentation' % project,
|
||||
u'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
4
doc/source/contributing.rst
Normal file
4
doc/source/contributing.rst
Normal file
@ -0,0 +1,4 @@
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
.. include:: ../../CONTRIBUTING.rst
|
25
doc/source/index.rst
Normal file
25
doc/source/index.rst
Normal file
@ -0,0 +1,25 @@
|
||||
.. os-testr documentation master file, created by
|
||||
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to os-testr's documentation!
|
||||
========================================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
contributing
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
12
doc/source/installation.rst
Normal file
12
doc/source/installation.rst
Normal file
@ -0,0 +1,12 @@
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
At the command line::
|
||||
|
||||
$ pip install os-testr
|
||||
|
||||
Or, if you have virtualenvwrapper installed::
|
||||
|
||||
$ mkvirtualenv os-testr
|
||||
$ pip install os-testr
|
1
doc/source/readme.rst
Normal file
1
doc/source/readme.rst
Normal file
@ -0,0 +1 @@
|
||||
.. include:: ../../README.rst
|
7
doc/source/usage.rst
Normal file
7
doc/source/usage.rst
Normal file
@ -0,0 +1,7 @@
|
||||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use os-testr in a project::
|
||||
|
||||
import os_testr
|
6
openstack-common.conf
Normal file
6
openstack-common.conf
Normal file
@ -0,0 +1,6 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from oslo-incubator.git
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=os_testr
|
49
os_testr.egg-info/PKG-INFO
Normal file
49
os_testr.egg-info/PKG-INFO
Normal file
@ -0,0 +1,49 @@
|
||||
Metadata-Version: 1.1
|
||||
Name: os-testr
|
||||
Version: 0.1.0
|
||||
Summary: A testr wrapper to provide functionality for OpenStack projects
|
||||
Home-page: http://www.openstack.org/
|
||||
Author: OpenStack
|
||||
Author-email: openstack-dev@lists.openstack.org
|
||||
License: UNKNOWN
|
||||
Description: ===============================
|
||||
os-testr
|
||||
===============================
|
||||
|
||||
A testr wrapper to provide functionality for OpenStack projects
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/os-testr
|
||||
* Source: http://git.openstack.org/cgit/openstack/os-testr
|
||||
* Bugs: http://bugs.launchpad.net/os-testr
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* ostestr: a testr wrapper that uses subunit-trace for output and builds some
|
||||
helpful extra functionality around testr
|
||||
* subunit-trace: an output filter for a subunit stream which provides useful
|
||||
information about the run
|
||||
* subunit2html: generates a test results html page from a subunit stream
|
||||
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
0.1.0
|
||||
-----
|
||||
* First release which includes: ostestr, subunit-trace, and subunit2html
|
||||
|
||||
|
||||
Platform: UNKNOWN
|
||||
Classifier: Environment :: OpenStack
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: Intended Audience :: System Administrators
|
||||
Classifier: License :: OSI Approved :: Apache Software License
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
44
os_testr.egg-info/SOURCES.txt
Normal file
44
os_testr.egg-info/SOURCES.txt
Normal file
@ -0,0 +1,44 @@
|
||||
.coveragerc
|
||||
.mailmap
|
||||
.testr.conf
|
||||
AUTHORS
|
||||
CONTRIBUTING.rst
|
||||
ChangeLog
|
||||
HACKING.rst
|
||||
LICENSE
|
||||
MANIFEST.in
|
||||
README.rst
|
||||
babel.cfg
|
||||
openstack-common.conf
|
||||
requirements.txt
|
||||
setup.cfg
|
||||
setup.py
|
||||
test-requirements.txt
|
||||
tox.ini
|
||||
doc/source/conf.py
|
||||
doc/source/contributing.rst
|
||||
doc/source/index.rst
|
||||
doc/source/installation.rst
|
||||
doc/source/readme.rst
|
||||
doc/source/usage.rst
|
||||
os_testr/__init__.py
|
||||
os_testr/os_testr.py
|
||||
os_testr/subunit2html.py
|
||||
os_testr/subunit_trace.py
|
||||
os_testr.egg-info/PKG-INFO
|
||||
os_testr.egg-info/SOURCES.txt
|
||||
os_testr.egg-info/dependency_links.txt
|
||||
os_testr.egg-info/entry_points.txt
|
||||
os_testr.egg-info/not-zip-safe
|
||||
os_testr.egg-info/pbr.json
|
||||
os_testr.egg-info/requires.txt
|
||||
os_testr.egg-info/top_level.txt
|
||||
os_testr/tests/__init__.py
|
||||
os_testr/tests/base.py
|
||||
os_testr/tests/test_os_testr.py
|
||||
os_testr/tests/test_return_codes.py
|
||||
os_testr/tests/files/__init__.py
|
||||
os_testr/tests/files/failing-tests
|
||||
os_testr/tests/files/passing-tests
|
||||
os_testr/tests/files/setup.cfg
|
||||
os_testr/tests/files/testr-conf
|
1
os_testr.egg-info/dependency_links.txt
Normal file
1
os_testr.egg-info/dependency_links.txt
Normal file
@ -0,0 +1 @@
|
||||
|
5
os_testr.egg-info/entry_points.txt
Normal file
5
os_testr.egg-info/entry_points.txt
Normal file
@ -0,0 +1,5 @@
|
||||
[console_scripts]
|
||||
ostestr = os_testr.os_testr:main
|
||||
subunit-trace = os_testr.subunit_trace:main
|
||||
subunit2html = os_testr.subunit2html:main
|
||||
|
1
os_testr.egg-info/not-zip-safe
Normal file
1
os_testr.egg-info/not-zip-safe
Normal file
@ -0,0 +1 @@
|
||||
|
1
os_testr.egg-info/pbr.json
Normal file
1
os_testr.egg-info/pbr.json
Normal file
@ -0,0 +1 @@
|
||||
{"git_version": "78f9371", "is_release": true}
|
5
os_testr.egg-info/requires.txt
Normal file
5
os_testr.egg-info/requires.txt
Normal file
@ -0,0 +1,5 @@
|
||||
pbr>=0.6,!=0.7,<1.0
|
||||
Babel>=1.3
|
||||
testrepository>=0.0.18
|
||||
python-subunit>=0.0.18
|
||||
testtools>=0.9.36,!=1.2.0
|
1
os_testr.egg-info/top_level.txt
Normal file
1
os_testr.egg-info/top_level.txt
Normal file
@ -0,0 +1 @@
|
||||
os_testr
|
19
os_testr/__init__.py
Normal file
19
os_testr/__init__.py
Normal file
@ -0,0 +1,19 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pbr.version
|
||||
|
||||
|
||||
__version__ = pbr.version.VersionInfo(
|
||||
'os_testr').version_string()
|
233
os_testr/os_testr.py
Executable file
233
os_testr/os_testr.py
Executable file
@ -0,0 +1,233 @@
|
||||
#!/usr/bin/env python2
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from subunit import run as subunit_run
|
||||
from testtools import run as testtools_run
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Tool to run openstack tests')
|
||||
parser.add_argument('--blacklist_file', '-b',
|
||||
help='Path to a blacklist file, this file contains a'
|
||||
' separate regex exclude on each newline')
|
||||
parser.add_argument('--regex', '-r',
|
||||
help='A normal testr selection regex. If a blacklist '
|
||||
'file is specified, the regex will be appended '
|
||||
'to the end of the generated regex from that '
|
||||
'file')
|
||||
parser.add_argument('--pretty', '-p', dest='pretty', action='store_true',
|
||||
help='Print pretty output from subunit-trace. This is '
|
||||
'mutually exclusive with --subunit')
|
||||
parser.add_argument('--no-pretty', dest='pretty', action='store_false',
|
||||
help='Disable the pretty output with subunit-trace')
|
||||
parser.add_argument('--subunit', '-s', action='store_true',
|
||||
help='output the raw subunit v2 from the test run '
|
||||
'this is mutuall exclusive with --pretty')
|
||||
parser.add_argument('--list', '-l', action='store_true',
|
||||
help='List all the tests which will be run.')
|
||||
parser.add_argument('--no-discover', '-n', metavar='TEST_ID',
|
||||
help="Takes in a single test to bypasses test "
|
||||
"discover and just excute the test specified")
|
||||
parser.add_argument('--slowest', dest='slowest', action='store_true',
|
||||
help="after the test run print the slowest tests")
|
||||
parser.add_argument('--no-slowest', dest='slowest', action='store_false',
|
||||
help="after the test run don't print the slowest "
|
||||
"tests")
|
||||
parser.add_argument('--pdb', metavar='TEST_ID',
|
||||
help='Run a single test that has pdb traces added')
|
||||
parser.add_argument('--parallel', dest='parallel', action='store_true',
|
||||
help='Run tests in parallel (this is the default)')
|
||||
parser.add_argument('--serial', dest='parallel', action='store_false',
|
||||
help='Run tests serially')
|
||||
parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS',
|
||||
help='The number of workers to use when running in '
|
||||
'parallel. By default this is the number of cpus')
|
||||
parser.add_argument('--until-failure', action='store_true',
|
||||
help='Run the tests in a loop until a failure is '
|
||||
'encountered. Running with subunit or pretty'
|
||||
'output enable will force the loop to run tests'
|
||||
'serially')
|
||||
parser.set_defaults(pretty=True, slowest=True, parallel=True)
|
||||
opts = parser.parse_args()
|
||||
return opts
|
||||
|
||||
|
||||
def construct_regex(blacklist_file, regex):
|
||||
if not blacklist_file:
|
||||
exclude_regex = ''
|
||||
else:
|
||||
black_file = open(blacklist_file, 'r')
|
||||
exclude_regex = ''
|
||||
for line in black_file:
|
||||
regex = line.strip()
|
||||
exclude_regex = '|'.join([regex, exclude_regex])
|
||||
if exclude_regex:
|
||||
exclude_regex = "'(?!.*" + exclude_regex + ")"
|
||||
if regex:
|
||||
exclude_regex += regex
|
||||
return exclude_regex
|
||||
|
||||
|
||||
def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur,
|
||||
until_failure):
|
||||
if parallel:
|
||||
cmd = ['testr', 'run', '--parallel']
|
||||
if concur:
|
||||
cmd.append('--concurrency=%s' % concur)
|
||||
else:
|
||||
cmd = ['testr', 'run']
|
||||
if list_tests:
|
||||
cmd = ['testr', 'list-tests']
|
||||
elif (subunit or pretty) and not until_failure:
|
||||
cmd.append('--subunit')
|
||||
elif not (subunit or pretty) and until_failure:
|
||||
cmd.append('--until-failure')
|
||||
cmd.append(regex)
|
||||
env = copy.deepcopy(os.environ)
|
||||
# This workaround is necessary because of lp bug 1411804 it's super hacky
|
||||
# and makes tons of unfounded assumptions, but it works for the most part
|
||||
if (subunit or pretty) and until_failure:
|
||||
proc = subprocess.Popen(['testr', 'list-tests', regex], env=env,
|
||||
stdout=subprocess.PIPE)
|
||||
out = proc.communicate()[0]
|
||||
raw_test_list = out.split('\n')
|
||||
bad = False
|
||||
test_list = []
|
||||
exclude_list = ['CAPTURE', 'TEST_TIMEOUT', 'PYTHON',
|
||||
'subunit.run discover']
|
||||
for line in raw_test_list:
|
||||
for exclude in exclude_list:
|
||||
if exclude in line:
|
||||
bad = True
|
||||
break
|
||||
elif not line:
|
||||
bad = True
|
||||
break
|
||||
if not bad:
|
||||
test_list.append(line)
|
||||
bad = False
|
||||
count = 0
|
||||
failed = False
|
||||
if not test_list:
|
||||
print("No tests to run")
|
||||
exit(1)
|
||||
# If pretty or subunit output is desired manually loop forever over
|
||||
# test individually and generate the desired output in a linear series
|
||||
# this avoids 1411804 while retaining most of the desired behavior
|
||||
while True:
|
||||
for test in test_list:
|
||||
if pretty:
|
||||
cmd = ['python', '-m', 'subunit.run', test]
|
||||
ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
|
||||
proc = subprocess.Popen(['subunit-trace',
|
||||
'--no-failure-debug', '-f',
|
||||
'--no-summary'], env=env,
|
||||
stdin=ps.stdout)
|
||||
ps.stdout.close()
|
||||
proc.communicate()
|
||||
if proc.returncode > 0:
|
||||
failed = True
|
||||
break
|
||||
else:
|
||||
try:
|
||||
subunit_run.main([sys.argv[0], test], sys.stdout)
|
||||
except SystemExit as e:
|
||||
if e > 0:
|
||||
print("Ran %s tests without failure" % count)
|
||||
exit(1)
|
||||
else:
|
||||
raise
|
||||
count = count + 1
|
||||
if failed:
|
||||
print("Ran %s tests without failure" % count)
|
||||
exit(0)
|
||||
# If not until-failure special case call testr like normal
|
||||
elif pretty and not list_tests:
|
||||
ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
|
||||
proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'],
|
||||
env=env, stdin=ps.stdout)
|
||||
ps.stdout.close()
|
||||
else:
|
||||
proc = subprocess.Popen(cmd, env=env)
|
||||
proc.communicate()
|
||||
return_code = proc.returncode
|
||||
if slowest and not list_tests:
|
||||
print("\nSlowest Tests:\n")
|
||||
slow_proc = subprocess.Popen(['testr', 'slowest'], env=env)
|
||||
slow_proc.communicate()
|
||||
return return_code
|
||||
|
||||
|
||||
def call_subunit_run(test_id, pretty, subunit):
|
||||
if pretty:
|
||||
env = copy.deepcopy(os.environ)
|
||||
cmd = ['python', '-m', 'subunit.run', test_id]
|
||||
ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
|
||||
proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'],
|
||||
env=env, stdin=ps.stdout)
|
||||
ps.stdout.close()
|
||||
proc.communicate()
|
||||
return proc.returncode
|
||||
elif subunit:
|
||||
subunit_run.main([sys.argv[0], test_id], sys.stdout)
|
||||
else:
|
||||
testtools_run.main([sys.argv[0], test_id], sys.stdout)
|
||||
|
||||
|
||||
def call_testtools_run(test_id):
|
||||
testtools_run.main([sys.argv[0], test_id], sys.stdout)
|
||||
|
||||
|
||||
def main():
|
||||
opts = parse_args()
|
||||
if opts.pretty and opts.subunit:
|
||||
msg = ('Subunit output and pretty output cannot be specified at the '
|
||||
'same time')
|
||||
print(msg)
|
||||
exit(2)
|
||||
if opts.list and opts.no_discover:
|
||||
msg = ('you can not list tests when you are bypassing discovery to '
|
||||
'run a single test')
|
||||
print(msg)
|
||||
exit(3)
|
||||
if not opts.parallel and opts.concurrency:
|
||||
msg = "You can't specify a concurrency to use when running serially"
|
||||
print(msg)
|
||||
exit(4)
|
||||
if (opts.pdb or opts.no_discover) and opts.until_failure:
|
||||
msg = "You can not use until_failure mode with pdb or no-discover"
|
||||
print(msg)
|
||||
exit(5)
|
||||
exclude_regex = construct_regex(opts.blacklist_file, opts.regex)
|
||||
if not os.path.isdir('.testrepository'):
|
||||
subprocess.call(['testr', 'init'])
|
||||
if not opts.no_discover and not opts.pdb:
|
||||
exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
|
||||
opts.slowest, opts.parallel, opts.concurrency,
|
||||
opts.until_failure))
|
||||
elif opts.pdb:
|
||||
exit(call_testtools_run(opts.pdb))
|
||||
else:
|
||||
exit(call_subunit_run(opts.no_discover, opts.pretty, opts.subunit))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
739
os_testr/subunit2html.py
Executable file
739
os_testr/subunit2html.py
Executable file
@ -0,0 +1,739 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Utility to convert a subunit stream to an html results file.
|
||||
Code is adapted from the pyunit Html test runner at
|
||||
http://tungwaiyip.info/software/HTMLTestRunner.html
|
||||
|
||||
Takes two arguments. First argument is path to subunit log file, second
|
||||
argument is path of desired output file. Second argument is optional,
|
||||
defaults to 'results.html'.
|
||||
|
||||
Original HTMLTestRunner License:
|
||||
------------------------------------------------------------------------
|
||||
Copyright (c) 2004-2007, Wai Yip Tung
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name Wai Yip Tung nor the names of its contributors may be
|
||||
used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
||||
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import io
|
||||
import sys
|
||||
import traceback
|
||||
from xml.sax import saxutils
|
||||
|
||||
import subunit
|
||||
import testtools
|
||||
|
||||
__version__ = '0.1'
|
||||
|
||||
|
||||
class TemplateData(object):
|
||||
"""Define a HTML template for report customerization and generation.
|
||||
|
||||
Overall structure of an HTML report
|
||||
|
||||
HTML
|
||||
+------------------------+
|
||||
|<html> |
|
||||
| <head> |
|
||||
| |
|
||||
| STYLESHEET |
|
||||
| +----------------+ |
|
||||
| | | |
|
||||
| +----------------+ |
|
||||
| |
|
||||
| </head> |
|
||||
| |
|
||||
| <body> |
|
||||
| |
|
||||
| HEADING |
|
||||
| +----------------+ |
|
||||
| | | |
|
||||
| +----------------+ |
|
||||
| |
|
||||
| REPORT |
|
||||
| +----------------+ |
|
||||
| | | |
|
||||
| +----------------+ |
|
||||
| |
|
||||
| ENDING |
|
||||
| +----------------+ |
|
||||
| | | |
|
||||
| +----------------+ |
|
||||
| |
|
||||
| </body> |
|
||||
|</html> |
|
||||
+------------------------+
|
||||
"""
|
||||
|
||||
STATUS = {
|
||||
0: 'pass',
|
||||
1: 'fail',
|
||||
2: 'error',
|
||||
3: 'skip',
|
||||
}
|
||||
|
||||
DEFAULT_TITLE = 'Unit Test Report'
|
||||
DEFAULT_DESCRIPTION = ''
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# HTML Template
|
||||
|
||||
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
|
||||
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<title>%(title)s</title>
|
||||
<meta name="generator" content="%(generator)s"/>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
%(stylesheet)s
|
||||
</head>
|
||||
<body>
|
||||
<script language="javascript" type="text/javascript"><!--
|
||||
output_list = Array();
|
||||
|
||||
/* level - 0:Summary; 1:Failed; 2:All */
|
||||
function showCase(level) {
|
||||
trs = document.getElementsByTagName("tr");
|
||||
for (var i = 0; i < trs.length; i++) {
|
||||
tr = trs[i];
|
||||
id = tr.id;
|
||||
if (id.substr(0,2) == 'ft') {
|
||||
if (level < 1) {
|
||||
tr.className = 'hiddenRow';
|
||||
}
|
||||
else {
|
||||
tr.className = '';
|
||||
}
|
||||
}
|
||||
if (id.substr(0,2) == 'pt') {
|
||||
if (level > 1) {
|
||||
tr.className = '';
|
||||
}
|
||||
else {
|
||||
tr.className = 'hiddenRow';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function showClassDetail(cid, count) {
|
||||
var id_list = Array(count);
|
||||
var toHide = 1;
|
||||
for (var i = 0; i < count; i++) {
|
||||
tid0 = 't' + cid.substr(1) + '.' + (i+1);
|
||||
tid = 'f' + tid0;
|
||||
tr = document.getElementById(tid);
|
||||
if (!tr) {
|
||||
tid = 'p' + tid0;
|
||||
tr = document.getElementById(tid);
|
||||
}
|
||||
id_list[i] = tid;
|
||||
if (tr.className) {
|
||||
toHide = 0;
|
||||
}
|
||||
}
|
||||
for (var i = 0; i < count; i++) {
|
||||
tid = id_list[i];
|
||||
if (toHide) {
|
||||
document.getElementById('div_'+tid).style.display = 'none'
|
||||
document.getElementById(tid).className = 'hiddenRow';
|
||||
}
|
||||
else {
|
||||
document.getElementById(tid).className = '';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function showTestDetail(div_id){
|
||||
var details_div = document.getElementById(div_id)
|
||||
var displayState = details_div.style.display
|
||||
// alert(displayState)
|
||||
if (displayState != 'block' ) {
|
||||
displayState = 'block'
|
||||
details_div.style.display = 'block'
|
||||
}
|
||||
else {
|
||||
details_div.style.display = 'none'
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function html_escape(s) {
|
||||
s = s.replace(/&/g,'&');
|
||||
s = s.replace(/</g,'<');
|
||||
s = s.replace(/>/g,'>');
|
||||
return s;
|
||||
}
|
||||
|
||||
/* obsoleted by detail in <div>
|
||||
function showOutput(id, name) {
|
||||
var w = window.open("", //url
|
||||
name,
|
||||
"resizable,scrollbars,status,width=800,height=450");
|
||||
d = w.document;
|
||||
d.write("<pre>");
|
||||
d.write(html_escape(output_list[id]));
|
||||
d.write("\n");
|
||||
d.write("<a href='javascript:window.close()'>close</a>\n");
|
||||
d.write("</pre>\n");
|
||||
d.close();
|
||||
}
|
||||
*/
|
||||
--></script>
|
||||
|
||||
%(heading)s
|
||||
%(report)s
|
||||
%(ending)s
|
||||
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
# variables: (title, generator, stylesheet, heading, report, ending)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Stylesheet
|
||||
#
|
||||
# alternatively use a <link> for external style sheet, e.g.
|
||||
# <link rel="stylesheet" href="$url" type="text/css">
|
||||
|
||||
STYLESHEET_TMPL = """
|
||||
<style type="text/css" media="screen">
|
||||
body { font-family: verdana, arial, helvetica, sans-serif;
|
||||
font-size: 80%; }
|
||||
table { font-size: 100%; width: 100%;}
|
||||
pre { font-size: 80%; }
|
||||
|
||||
/* -- heading -------------------------------------------------------------- */
|
||||
h1 {
|
||||
font-size: 16pt;
|
||||
color: gray;
|
||||
}
|
||||
.heading {
|
||||
margin-top: 0ex;
|
||||
margin-bottom: 1ex;
|
||||
}
|
||||
|
||||
.heading .attribute {
|
||||
margin-top: 1ex;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.heading .description {
|
||||
margin-top: 4ex;
|
||||
margin-bottom: 6ex;
|
||||
}
|
||||
|
||||
/* -- css div popup -------------------------------------------------------- */
|
||||
a.popup_link {
|
||||
}
|
||||
|
||||
a.popup_link:hover {
|
||||
color: red;
|
||||
}
|
||||
|
||||
.popup_window {
|
||||
display: none;
|
||||
overflow-x: scroll;
|
||||
/*border: solid #627173 1px; */
|
||||
padding: 10px;
|
||||
background-color: #E6E6D6;
|
||||
font-family: "Ubuntu Mono", "Lucida Console", "Courier New", monospace;
|
||||
text-align: left;
|
||||
font-size: 8pt;
|
||||
}
|
||||
|
||||
}
|
||||
/* -- report --------------------------------------------------------------- */
|
||||
#show_detail_line {
|
||||
margin-top: 3ex;
|
||||
margin-bottom: 1ex;
|
||||
}
|
||||
#result_table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
border: 1px solid #777;
|
||||
}
|
||||
#header_row {
|
||||
font-weight: bold;
|
||||
color: white;
|
||||
background-color: #777;
|
||||
}
|
||||
#result_table td {
|
||||
border: 1px solid #777;
|
||||
padding: 2px;
|
||||
}
|
||||
#total_row { font-weight: bold; }
|
||||
.passClass { background-color: #6c6; }
|
||||
.failClass { background-color: #c60; }
|
||||
.errorClass { background-color: #c00; }
|
||||
.passCase { color: #6c6; }
|
||||
.failCase { color: #c60; font-weight: bold; }
|
||||
.errorCase { color: #c00; font-weight: bold; }
|
||||
.hiddenRow { display: none; }
|
||||
.testcase { margin-left: 2em; }
|
||||
td.testname {width: 40%}
|
||||
td.small {width: 40px}
|
||||
|
||||
/* -- ending --------------------------------------------------------------- */
|
||||
#ending {
|
||||
}
|
||||
|
||||
</style>
|
||||
"""
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Heading
|
||||
#
|
||||
|
||||
HEADING_TMPL = """<div class='heading'>
|
||||
<h1>%(title)s</h1>
|
||||
%(parameters)s
|
||||
<p class='description'>%(description)s</p>
|
||||
</div>
|
||||
|
||||
""" # variables: (title, parameters, description)
|
||||
|
||||
HEADING_ATTRIBUTE_TMPL = """
|
||||
<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
|
||||
""" # variables: (name, value)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Report
|
||||
#
|
||||
|
||||
REPORT_TMPL = """
|
||||
<p id='show_detail_line'>Show
|
||||
<a href='javascript:showCase(0)'>Summary</a>
|
||||
<a href='javascript:showCase(1)'>Failed</a>
|
||||
<a href='javascript:showCase(2)'>All</a>
|
||||
</p>
|
||||
<table id='result_table'>
|
||||
<colgroup>
|
||||
<col align='left' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
<col align='right' />
|
||||
</colgroup>
|
||||
<tr id='header_row'>
|
||||
<td>Test Group/Test case</td>
|
||||
<td>Count</td>
|
||||
<td>Pass</td>
|
||||
<td>Fail</td>
|
||||
<td>Error</td>
|
||||
<td>Skip</td>
|
||||
<td>View</td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
%(test_list)s
|
||||
<tr id='total_row'>
|
||||
<td>Total</td>
|
||||
<td>%(count)s</td>
|
||||
<td>%(Pass)s</td>
|
||||
<td>%(fail)s</td>
|
||||
<td>%(error)s</td>
|
||||
<td>%(skip)s</td>
|
||||
<td> </td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
</table>
|
||||
""" # variables: (test_list, count, Pass, fail, error)
|
||||
|
||||
REPORT_CLASS_TMPL = r"""
|
||||
<tr class='%(style)s'>
|
||||
<td class="testname">%(desc)s</td>
|
||||
<td class="small">%(count)s</td>
|
||||
<td class="small">%(Pass)s</td>
|
||||
<td class="small">%(fail)s</td>
|
||||
<td class="small">%(error)s</td>
|
||||
<td class="small">%(skip)s</td>
|
||||
<td class="small"><a href="javascript:showClassDetail('%(cid)s',%(count)s)"
|
||||
>Detail</a></td>
|
||||
<td> </td>
|
||||
</tr>
|
||||
""" # variables: (style, desc, count, Pass, fail, error, cid)
|
||||
|
||||
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
|
||||
<tr id='%(tid)s' class='%(Class)s'>
|
||||
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
|
||||
<td colspan='7' align='left'>
|
||||
|
||||
<!--css div popup start-->
|
||||
<a class="popup_link" onfocus='this.blur();'
|
||||
href="javascript:showTestDetail('div_%(tid)s')" >
|
||||
%(status)s</a>
|
||||
|
||||
<div id='div_%(tid)s' class="popup_window">
|
||||
<div style='text-align: right; color:red;cursor:pointer'>
|
||||
<a onfocus='this.blur();'
|
||||
onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
|
||||
[x]</a>
|
||||
</div>
|
||||
<pre>
|
||||
%(script)s
|
||||
</pre>
|
||||
</div>
|
||||
<!--css div popup end-->
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
""" # variables: (tid, Class, style, desc, status)
|
||||
|
||||
REPORT_TEST_NO_OUTPUT_TMPL = r"""
|
||||
<tr id='%(tid)s' class='%(Class)s'>
|
||||
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
|
||||
<td colspan='6' align='center'>%(status)s</td>
|
||||
</tr>
|
||||
""" # variables: (tid, Class, style, desc, status)
|
||||
|
||||
REPORT_TEST_OUTPUT_TMPL = r"""
|
||||
%(id)s: %(output)s
|
||||
""" # variables: (id, output)
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# ENDING
|
||||
#
|
||||
|
||||
ENDING_TMPL = """<div id='ending'> </div>"""
|
||||
|
||||
# -------------------- The end of the Template class -------------------
|
||||
|
||||
|
||||
class ClassInfoWrapper(object):
|
||||
def __init__(self, name, mod):
|
||||
self.name = name
|
||||
self.mod = mod
|
||||
|
||||
def __repr__(self):
|
||||
return "%s" % (self.name)
|
||||
|
||||
|
||||
class HtmlOutput(testtools.TestResult):
|
||||
"""Output test results in html."""
|
||||
|
||||
def __init__(self, html_file='result.html'):
|
||||
super(HtmlOutput, self).__init__()
|
||||
self.success_count = 0
|
||||
self.failure_count = 0
|
||||
self.error_count = 0
|
||||
self.skip_count = 0
|
||||
self.result = []
|
||||
self.html_file = html_file
|
||||
|
||||
def addSuccess(self, test):
|
||||
self.success_count += 1
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
self.result.append((0, test, output, ''))
|
||||
|
||||
def addSkip(self, test, err):
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
self.skip_count += 1
|
||||
self.result.append((3, test, output, ''))
|
||||
|
||||
def addError(self, test, err):
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
# Skipped tests are handled by SkipTest Exceptions.
|
||||
# if err[0] == SkipTest:
|
||||
# self.skip_count += 1
|
||||
# self.result.append((3, test, output, ''))
|
||||
else:
|
||||
self.error_count += 1
|
||||
_exc_str = self.formatErr(err)
|
||||
self.result.append((2, test, output, _exc_str))
|
||||
|
||||
def addFailure(self, test, err):
|
||||
print(test)
|
||||
self.failure_count += 1
|
||||
_exc_str = self.formatErr(err)
|
||||
output = test.shortDescription()
|
||||
if output is None:
|
||||
output = test.id()
|
||||
self.result.append((1, test, output, _exc_str))
|
||||
|
||||
def formatErr(self, err):
|
||||
exctype, value, tb = err
|
||||
return ''.join(traceback.format_exception(exctype, value, tb))
|
||||
|
||||
def stopTestRun(self):
|
||||
super(HtmlOutput, self).stopTestRun()
|
||||
self.stopTime = datetime.datetime.now()
|
||||
report_attrs = self._getReportAttributes()
|
||||
generator = 'subunit2html %s' % __version__
|
||||
heading = self._generate_heading(report_attrs)
|
||||
report = self._generate_report()
|
||||
ending = self._generate_ending()
|
||||
output = TemplateData.HTML_TMPL % dict(
|
||||
title=saxutils.escape(TemplateData.DEFAULT_TITLE),
|
||||
generator=generator,
|
||||
stylesheet=TemplateData.STYLESHEET_TMPL,
|
||||
heading=heading,
|
||||
report=report,
|
||||
ending=ending,
|
||||
)
|
||||
if self.html_file:
|
||||
with open(self.html_file, 'wb') as html_file:
|
||||
html_file.write(output.encode('utf8'))
|
||||
|
||||
def _getReportAttributes(self):
|
||||
"""Return report attributes as a list of (name, value)."""
|
||||
status = []
|
||||
if self.success_count:
|
||||
status.append('Pass %s' % self.success_count)
|
||||
if self.failure_count:
|
||||
status.append('Failure %s' % self.failure_count)
|
||||
if self.error_count:
|
||||
status.append('Error %s' % self.error_count)
|
||||
if self.skip_count:
|
||||
status.append('Skip %s' % self.skip_count)
|
||||
if status:
|
||||
status = ' '.join(status)
|
||||
else:
|
||||
status = 'none'
|
||||
return [
|
||||
('Status', status),
|
||||
]
|
||||
|
||||
def _generate_heading(self, report_attrs):
|
||||
a_lines = []
|
||||
for name, value in report_attrs:
|
||||
line = TemplateData.HEADING_ATTRIBUTE_TMPL % dict(
|
||||
name=saxutils.escape(name),
|
||||
value=saxutils.escape(value),
|
||||
)
|
||||
a_lines.append(line)
|
||||
heading = TemplateData.HEADING_TMPL % dict(
|
||||
title=saxutils.escape(TemplateData.DEFAULT_TITLE),
|
||||
parameters=''.join(a_lines),
|
||||
description=saxutils.escape(TemplateData.DEFAULT_DESCRIPTION),
|
||||
)
|
||||
return heading
|
||||
|
||||
def _generate_report(self):
|
||||
rows = []
|
||||
sortedResult = self._sortResult(self.result)
|
||||
for cid, (cls, cls_results) in enumerate(sortedResult):
|
||||
# subtotal for a class
|
||||
np = nf = ne = ns = 0
|
||||
for n, t, o, e in cls_results:
|
||||
if n == 0:
|
||||
np += 1
|
||||
elif n == 1:
|
||||
nf += 1
|
||||
elif n == 2:
|
||||
ne += 1
|
||||
else:
|
||||
ns += 1
|
||||
|
||||
# format class description
|
||||
if cls.mod == "__main__":
|
||||
name = cls.name
|
||||
else:
|
||||
name = "%s" % (cls.name)
|
||||
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
|
||||
desc = doc and '%s: %s' % (name, doc) or name
|
||||
|
||||
row = TemplateData.REPORT_CLASS_TMPL % dict(
|
||||
style=(ne > 0 and 'errorClass' or nf > 0
|
||||
and 'failClass' or 'passClass'),
|
||||
desc = desc,
|
||||
count = np + nf + ne + ns,
|
||||
Pass = np,
|
||||
fail = nf,
|
||||
error = ne,
|
||||
skip = ns,
|
||||
cid = 'c%s' % (cid + 1),
|
||||
)
|
||||
rows.append(row)
|
||||
|
||||
for tid, (n, t, o, e) in enumerate(cls_results):
|
||||
self._generate_report_test(rows, cid, tid, n, t, o, e)
|
||||
|
||||
report = TemplateData.REPORT_TMPL % dict(
|
||||
test_list=''.join(rows),
|
||||
count=str(self.success_count + self.failure_count +
|
||||
self.error_count + self.skip_count),
|
||||
Pass=str(self.success_count),
|
||||
fail=str(self.failure_count),
|
||||
error=str(self.error_count),
|
||||
skip=str(self.skip_count),
|
||||
)
|
||||
return report
|
||||
|
||||
def _sortResult(self, result_list):
|
||||
# unittest does not seems to run in any particular order.
|
||||
# Here at least we want to group them together by class.
|
||||
rmap = {}
|
||||
classes = []
|
||||
for n, t, o, e in result_list:
|
||||
if hasattr(t, '_tests'):
|
||||
for inner_test in t._tests:
|
||||
self._add_cls(rmap, classes, inner_test,
|
||||
(n, inner_test, o, e))
|
||||
else:
|
||||
self._add_cls(rmap, classes, t, (n, t, o, e))
|
||||
classort = lambda s: str(s)
|
||||
sortedclasses = sorted(classes, key=classort)
|
||||
r = [(cls, rmap[str(cls)]) for cls in sortedclasses]
|
||||
return r
|
||||
|
||||
def _add_cls(self, rmap, classes, test, data_tuple):
|
||||
if hasattr(test, 'test'):
|
||||
test = test.test
|
||||
if test.__class__ == subunit.RemotedTestCase:
|
||||
cl = test._RemotedTestCase__description.rsplit('.', 1)[0]
|
||||
mod = cl.rsplit('.', 1)[0]
|
||||
cls = ClassInfoWrapper(cl, mod)
|
||||
else:
|
||||
cls = ClassInfoWrapper(str(test.__class__), str(test.__module__))
|
||||
if not str(cls) in rmap:
|
||||
rmap[str(cls)] = []
|
||||
classes.append(cls)
|
||||
rmap[str(cls)].append(data_tuple)
|
||||
|
||||
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
|
||||
# e.g. 'pt1.1', 'ft1.1', etc
|
||||
# ptx.x for passed/skipped tests and ftx.x for failed/errored tests.
|
||||
has_output = bool(o or e)
|
||||
tid = ((n == 0 or n == 3) and
|
||||
'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1)
|
||||
name = t.id().split('.')[-1]
|
||||
# if shortDescription is not the function name, use it
|
||||
if t.shortDescription().find(name) == -1:
|
||||
doc = t.shortDescription()
|
||||
else:
|
||||
doc = None
|
||||
desc = doc and ('%s: %s' % (name, doc)) or name
|
||||
tmpl = (has_output and TemplateData.REPORT_TEST_WITH_OUTPUT_TMPL
|
||||
or TemplateData.REPORT_TEST_NO_OUTPUT_TMPL)
|
||||
|
||||
script = TemplateData.REPORT_TEST_OUTPUT_TMPL % dict(
|
||||
id=tid,
|
||||
output=saxutils.escape(o + e),
|
||||
)
|
||||
|
||||
row = tmpl % dict(
|
||||
tid=tid,
|
||||
Class=((n == 0 or n == 3) and 'hiddenRow' or 'none'),
|
||||
style=(n == 2 and 'errorCase' or
|
||||
(n == 1 and 'failCase' or 'none')),
|
||||
desc=desc,
|
||||
script=script,
|
||||
status=TemplateData.STATUS[n],
|
||||
)
|
||||
rows.append(row)
|
||||
if not has_output:
|
||||
return
|
||||
|
||||
def _generate_ending(self):
|
||||
return TemplateData.ENDING_TMPL
|
||||
|
||||
def startTestRun(self):
|
||||
super(HtmlOutput, self).startTestRun()
|
||||
|
||||
|
||||
class FileAccumulator(testtools.StreamResult):
|
||||
|
||||
def __init__(self):
|
||||
super(FileAccumulator, self).__init__()
|
||||
self.route_codes = collections.defaultdict(io.BytesIO)
|
||||
|
||||
def status(self, **kwargs):
|
||||
if kwargs.get('file_name') != 'stdout':
|
||||
return
|
||||
file_bytes = kwargs.get('file_bytes')
|
||||
if not file_bytes:
|
||||
return
|
||||
route_code = kwargs.get('route_code')
|
||||
stream = self.route_codes[route_code]
|
||||
stream.write(file_bytes)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Need at least one argument: path to subunit log.")
|
||||
exit(1)
|
||||
subunit_file = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
html_file = sys.argv[2]
|
||||
else:
|
||||
html_file = 'results.html'
|
||||
|
||||
html_result = HtmlOutput(html_file)
|
||||
stream = open(subunit_file, 'rb')
|
||||
|
||||
# Feed the subunit stream through both a V1 and V2 parser.
|
||||
# Depends on having the v2 capable libraries installed.
|
||||
# First V2.
|
||||
# Non-v2 content and captured non-test output will be presented as file
|
||||
# segments called stdout.
|
||||
suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout')
|
||||
# The HTML output code is in legacy mode.
|
||||
result = testtools.StreamToExtendedDecorator(html_result)
|
||||
# Divert non-test output
|
||||
accumulator = FileAccumulator()
|
||||
result = testtools.StreamResultRouter(result)
|
||||
result.add_rule(accumulator, 'test_id', test_id=None)
|
||||
result.startTestRun()
|
||||
suite.run(result)
|
||||
# Now reprocess any found stdout content as V1 subunit
|
||||
for bytes_io in accumulator.route_codes.values():
|
||||
bytes_io.seek(0)
|
||||
suite = subunit.ProtocolTestCase(bytes_io)
|
||||
suite.run(html_result)
|
||||
result.stopTestRun()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
317
os_testr/subunit_trace.py
Executable file
317
os_testr/subunit_trace.py
Executable file
@ -0,0 +1,317 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright 2014 Samsung Electronics
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Trace a subunit stream in reasonable detail and high accuracy."""
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import subunit
|
||||
import testtools
|
||||
|
||||
# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
|
||||
# was renamed to dbm.ndbm, this block takes that into account
|
||||
try:
|
||||
import anydbm as dbm
|
||||
except ImportError:
|
||||
import dbm
|
||||
|
||||
DAY_SECONDS = 60 * 60 * 24
|
||||
FAILS = []
|
||||
RESULTS = {}
|
||||
|
||||
|
||||
def total_seconds(timedelta):
|
||||
# NOTE(mtreinish): This method is built-in to the timedelta class in
|
||||
# python >= 2.7 it is here to enable it's use on older versions
|
||||
return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 +
|
||||
timedelta.microseconds) / 10 ** 6
|
||||
|
||||
|
||||
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
|
||||
"""Clean up the test name for display.
|
||||
|
||||
By default we strip out the tags in the test because they don't help us
|
||||
in identifying the test that is run to it's result.
|
||||
|
||||
Make it possible to strip out the testscenarios information (not to
|
||||
be confused with tempest scenarios) however that's often needed to
|
||||
indentify generated negative tests.
|
||||
"""
|
||||
if strip_tags:
|
||||
tags_start = name.find('[')
|
||||
tags_end = name.find(']')
|
||||
if tags_start > 0 and tags_end > tags_start:
|
||||
newname = name[:tags_start]
|
||||
newname += name[tags_end + 1:]
|
||||
name = newname
|
||||
|
||||
if strip_scenarios:
|
||||
tags_start = name.find('(')
|
||||
tags_end = name.find(')')
|
||||
if tags_start > 0 and tags_end > tags_start:
|
||||
newname = name[:tags_start]
|
||||
newname += name[tags_end + 1:]
|
||||
name = newname
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def get_duration(timestamps):
|
||||
start, end = timestamps
|
||||
if not start or not end:
|
||||
duration = ''
|
||||
else:
|
||||
delta = end - start
|
||||
duration = '%d.%06ds' % (
|
||||
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
|
||||
return duration
|
||||
|
||||
|
||||
def find_worker(test):
|
||||
"""Get the worker number.
|
||||
|
||||
If there are no workers because we aren't in a concurrent environment,
|
||||
assume the worker number is 0.
|
||||
"""
|
||||
for tag in test['tags']:
|
||||
if tag.startswith('worker-'):
|
||||
return int(tag[7:])
|
||||
return 0
|
||||
|
||||
|
||||
# Print out stdout/stderr if it exists, always
|
||||
def print_attachments(stream, test, all_channels=False):
|
||||
"""Print out subunit attachments.
|
||||
|
||||
Print out subunit attachments that contain content. This
|
||||
runs in 2 modes, one for successes where we print out just stdout
|
||||
and stderr, and an override that dumps all the attachments.
|
||||
"""
|
||||
channels = ('stdout', 'stderr')
|
||||
for name, detail in test['details'].items():
|
||||
# NOTE(sdague): the subunit names are a little crazy, and actually
|
||||
# are in the form pythonlogging:'' (with the colon and quotes)
|
||||
name = name.split(':')[0]
|
||||
if detail.content_type.type == 'test':
|
||||
detail.content_type.type = 'text'
|
||||
if (all_channels or name in channels) and detail.as_text():
|
||||
title = "Captured %s:" % name
|
||||
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
|
||||
# indent attachment lines 4 spaces to make them visually
|
||||
# offset
|
||||
for line in detail.as_text().split('\n'):
|
||||
stream.write(" %s\n" % line)
|
||||
|
||||
|
||||
def find_test_run_time_diff(test_id, run_time):
|
||||
times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'),
|
||||
'times.dbm')
|
||||
if os.path.isfile(times_db_path):
|
||||
try:
|
||||
test_times = dbm.open(times_db_path)
|
||||
except Exception:
|
||||
return False
|
||||
avg_runtime = float(test_times.get(str(test_id), False))
|
||||
if avg_runtime and avg_runtime > 0:
|
||||
run_time = float(run_time.rstrip('s'))
|
||||
perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
|
||||
return perc_diff
|
||||
return False
|
||||
|
||||
|
||||
def show_outcome(stream, test, print_failures=False, failonly=False,
|
||||
threshold='0'):
|
||||
global RESULTS
|
||||
status = test['status']
|
||||
# TODO(sdague): ask lifeless why on this?
|
||||
if status == 'exists':
|
||||
return
|
||||
|
||||
worker = find_worker(test)
|
||||
name = cleanup_test_name(test['id'])
|
||||
duration = get_duration(test['timestamps'])
|
||||
|
||||
if worker not in RESULTS:
|
||||
RESULTS[worker] = []
|
||||
RESULTS[worker].append(test)
|
||||
|
||||
# don't count the end of the return code as a fail
|
||||
if name == 'process-returncode':
|
||||
return
|
||||
|
||||
if status == 'fail':
|
||||
FAILS.append(test)
|
||||
stream.write('{%s} %s [%s] ... FAILED\n' % (
|
||||
worker, name, duration))
|
||||
if not print_failures:
|
||||
print_attachments(stream, test, all_channels=True)
|
||||
elif not failonly:
|
||||
if status == 'success':
|
||||
out_string = '{%s} %s [%s' % (worker, name, duration)
|
||||
perc_diff = find_test_run_time_diff(test['id'], duration)
|
||||
if perc_diff and abs(perc_diff) >= abs(float(threshold)):
|
||||
if perc_diff > 0:
|
||||
out_string = out_string + ' +%.2f%%' % perc_diff
|
||||
else:
|
||||
out_string = out_string + ' %.2f%%' % perc_diff
|
||||
stream.write(out_string + '] ... ok\n')
|
||||
print_attachments(stream, test)
|
||||
elif status == 'skip':
|
||||
stream.write('{%s} %s ... SKIPPED: %s\n' % (
|
||||
worker, name, test['details']['reason'].as_text()))
|
||||
else:
|
||||
stream.write('{%s} %s [%s] ... %s\n' % (
|
||||
worker, name, duration, test['status']))
|
||||
if not print_failures:
|
||||
print_attachments(stream, test, all_channels=True)
|
||||
|
||||
stream.flush()
|
||||
|
||||
|
||||
def print_fails(stream):
|
||||
"""Print summary failure report.
|
||||
|
||||
Currently unused, however there remains debate on inline vs. at end
|
||||
reporting, so leave the utility function for later use.
|
||||
"""
|
||||
if not FAILS:
|
||||
return
|
||||
stream.write("\n==============================\n")
|
||||
stream.write("Failed %s tests - output below:" % len(FAILS))
|
||||
stream.write("\n==============================\n")
|
||||
for f in FAILS:
|
||||
stream.write("\n%s\n" % f['id'])
|
||||
stream.write("%s\n" % ('-' * len(f['id'])))
|
||||
print_attachments(stream, f, all_channels=True)
|
||||
stream.write('\n')
|
||||
|
||||
|
||||
def count_tests(key, value):
|
||||
count = 0
|
||||
for k, v in RESULTS.items():
|
||||
for item in v:
|
||||
if key in item:
|
||||
if re.search(value, item[key]):
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def run_time():
|
||||
runtime = 0.0
|
||||
for k, v in RESULTS.items():
|
||||
for test in v:
|
||||
runtime += float(get_duration(test['timestamps']).strip('s'))
|
||||
return runtime
|
||||
|
||||
|
||||
def worker_stats(worker):
|
||||
tests = RESULTS[worker]
|
||||
num_tests = len(tests)
|
||||
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
|
||||
return num_tests, delta
|
||||
|
||||
|
||||
def print_summary(stream, elapsed_time):
|
||||
stream.write("\n======\nTotals\n======\n")
|
||||
stream.write("Ran: %s tests in %.4f sec.\n" % (
|
||||
count_tests('status', '.*'), total_seconds(elapsed_time)))
|
||||
stream.write(" - Passed: %s\n" % count_tests('status', '^success$'))
|
||||
stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$'))
|
||||
stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$'))
|
||||
stream.write(" - Unexpected Success: %s\n" % count_tests('status',
|
||||
'^uxsuccess$'))
|
||||
stream.write(" - Failed: %s\n" % count_tests('status', '^fail$'))
|
||||
stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time())
|
||||
|
||||
# we could have no results, especially as we filter out the process-codes
|
||||
if RESULTS:
|
||||
stream.write("\n==============\nWorker Balance\n==============\n")
|
||||
|
||||
for w in range(max(RESULTS.keys()) + 1):
|
||||
if w not in RESULTS:
|
||||
stream.write(
|
||||
" - WARNING: missing Worker %s! "
|
||||
"Race in testr accounting.\n" % w)
|
||||
else:
|
||||
num, time = worker_stats(w)
|
||||
stream.write(" - Worker %s (%s tests) => %ss\n" %
|
||||
(w, num, time))
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--no-failure-debug', '-n', action='store_true',
|
||||
dest='print_failures', help='Disable printing failure '
|
||||
'debug information in realtime')
|
||||
parser.add_argument('--fails', '-f', action='store_true',
|
||||
dest='post_fails', help='Print failure debug '
|
||||
'information after the stream is proccesed')
|
||||
parser.add_argument('--failonly', action='store_true',
|
||||
dest='failonly', help="Don't print success items",
|
||||
default=(
|
||||
os.environ.get('TRACE_FAILONLY', False)
|
||||
is not False))
|
||||
parser.add_argument('--diff-threshold', '-t', dest='threshold',
|
||||
help="Threshold to use for displaying percent change "
|
||||
"from the avg run time. If one is not specified "
|
||||
"the percent change will always be displayed")
|
||||
parser.add_argument('--no-summary', action='store_true',
|
||||
help="Don't print the summary of the test run after "
|
||||
" completes")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
stream = subunit.ByteStreamToStreamResult(
|
||||
sys.stdin, non_subunit_name='stdout')
|
||||
outcomes = testtools.StreamToDict(
|
||||
functools.partial(show_outcome, sys.stdout,
|
||||
print_failures=args.print_failures,
|
||||
failonly=args.failonly))
|
||||
summary = testtools.StreamSummary()
|
||||
result = testtools.CopyStreamResult([outcomes, summary])
|
||||
result = testtools.StreamResultRouter(result)
|
||||
cat = subunit.test_results.CatFiles(sys.stdout)
|
||||
result.add_rule(cat, 'test_id', test_id=None)
|
||||
start_time = datetime.datetime.utcnow()
|
||||
result.startTestRun()
|
||||
try:
|
||||
stream.run(result)
|
||||
finally:
|
||||
result.stopTestRun()
|
||||
stop_time = datetime.datetime.utcnow()
|
||||
elapsed_time = stop_time - start_time
|
||||
|
||||
if count_tests('status', '.*') == 0:
|
||||
print("The test run didn't actually run any tests")
|
||||
exit(1)
|
||||
if args.post_fails:
|
||||
print_fails(sys.stdout)
|
||||
if not args.no_summary:
|
||||
print_summary(sys.stdout, elapsed_time)
|
||||
exit(0 if summary.wasSuccessful() else 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
0
os_testr/tests/__init__.py
Normal file
0
os_testr/tests/__init__.py
Normal file
23
os_testr/tests/base.py
Normal file
23
os_testr/tests/base.py
Normal file
@ -0,0 +1,23 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslotest import base
|
||||
|
||||
|
||||
class TestCase(base.BaseTestCase):
|
||||
|
||||
"""Test case base class for all unit tests."""
|
0
os_testr/tests/files/__init__.py
Normal file
0
os_testr/tests/files/__init__.py
Normal file
23
os_testr/tests/files/failing-tests
Normal file
23
os_testr/tests/files/failing-tests
Normal file
@ -0,0 +1,23 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
class FakeTestClass(testtools.TestCase):
|
||||
def test_pass(self):
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_pass_list(self):
|
||||
test_list = ['test', 'a', 'b']
|
||||
self.assertIn('fail', test_list)
|
23
os_testr/tests/files/passing-tests
Normal file
23
os_testr/tests/files/passing-tests
Normal file
@ -0,0 +1,23 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
class FakeTestClass(testtools.TestCase):
|
||||
def test_pass(self):
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_pass_list(self):
|
||||
test_list = ['test', 'a', 'b']
|
||||
self.assertIn('test', test_list)
|
20
os_testr/tests/files/setup.cfg
Normal file
20
os_testr/tests/files/setup.cfg
Normal file
@ -0,0 +1,20 @@
|
||||
[metadata]
|
||||
name = tempest_unit_tests
|
||||
version = 1
|
||||
summary = Fake Project for testing wrapper scripts
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://www.openstack.org/
|
||||
classifier =
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
Intended Audience :: Developers
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
|
||||
[global]
|
||||
setup-hooks =
|
||||
pbr.hooks.setup_hook
|
5
os_testr/tests/files/testr-conf
Normal file
5
os_testr/tests/files/testr-conf
Normal file
@ -0,0 +1,5 @@
|
||||
[DEFAULT]
|
||||
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
group_regex=([^\.]*\.)*
|
28
os_testr/tests/test_os_testr.py
Normal file
28
os_testr/tests/test_os_testr.py
Normal file
@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
test_os_testr
|
||||
----------------------------------
|
||||
|
||||
Tests for `os_testr` module.
|
||||
"""
|
||||
|
||||
from os_testr.tests import base
|
||||
|
||||
|
||||
class TestOs_testr(base.TestCase):
|
||||
|
||||
def test_something(self):
|
||||
pass
|
104
os_testr/tests/test_return_codes.py
Normal file
104
os_testr/tests/test_return_codes.py
Normal file
@ -0,0 +1,104 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import StringIO
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import testtools
|
||||
|
||||
from os_testr.tests import base
|
||||
|
||||
DEVNULL = open(os.devnull, 'wb')
|
||||
|
||||
|
||||
class TestReturnCodes(base.TestCase):
|
||||
def setUp(self):
|
||||
super(TestReturnCodes, self).setUp()
|
||||
# Setup test dirs
|
||||
self.directory = tempfile.mkdtemp(prefix='ostestr-unit')
|
||||
self.addCleanup(shutil.rmtree, self.directory)
|
||||
self.test_dir = os.path.join(self.directory, 'tests')
|
||||
os.mkdir(self.test_dir)
|
||||
# Setup Test files
|
||||
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
|
||||
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
|
||||
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
|
||||
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
|
||||
self.init_file = os.path.join(self.test_dir, '__init__.py')
|
||||
self.setup_py = os.path.join(self.directory, 'setup.py')
|
||||
shutil.copy('os_testr/tests/files/testr-conf', self.testr_conf_file)
|
||||
shutil.copy('os_testr/tests/files/passing-tests', self.passing_file)
|
||||
shutil.copy('os_testr/tests/files/failing-tests', self.failing_file)
|
||||
shutil.copy('setup.py', self.setup_py)
|
||||
shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file)
|
||||
shutil.copy('os_testr/tests/files/__init__.py', self.init_file)
|
||||
|
||||
self.stdout = StringIO.StringIO()
|
||||
self.stderr = StringIO.StringIO()
|
||||
# Change directory, run wrapper and check result
|
||||
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
|
||||
os.chdir(self.directory)
|
||||
|
||||
def assertRunExit(self, cmd, expected, subunit=False):
|
||||
p = subprocess.Popen(
|
||||
"%s" % cmd, shell=True,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = p.communicate()
|
||||
|
||||
if not subunit:
|
||||
self.assertEqual(
|
||||
p.returncode, expected,
|
||||
"Stdout: %s; Stderr: %s" % (out, err))
|
||||
else:
|
||||
self.assertEqual(p.returncode, expected,
|
||||
"Expected return code: %s doesn't match actual "
|
||||
"return code of: %s" % (expected, p.returncode))
|
||||
|
||||
def test_default_passing(self):
|
||||
self.assertRunExit('ostestr --regex passing', 0)
|
||||
|
||||
def test_default_fails(self):
|
||||
self.assertRunExit('ostestr', 1)
|
||||
|
||||
def test_default_passing_no_slowest(self):
|
||||
self.assertRunExit('ostestr --no-slowest --regex passing', 0)
|
||||
|
||||
def test_default_fails_no_slowest(self):
|
||||
self.assertRunExit('ostestr --no-slowest', 1)
|
||||
|
||||
def test_default_serial_passing(self):
|
||||
self.assertRunExit('ostestr --serial --regex passing', 0)
|
||||
|
||||
def test_default_serial_fails(self):
|
||||
self.assertRunExit('ostestr --serial', 1)
|
||||
|
||||
def test_testr_subunit_passing(self):
|
||||
self.assertRunExit('ostestr --no-pretty --subunit --regex passing', 0,
|
||||
subunit=True)
|
||||
|
||||
@testtools.skip('Skipped because of testrepository lp bug #1411804')
|
||||
def test_testr_subunit_fails(self):
|
||||
self.assertRunExit('ostestr --no-pretty --subunit', 1, subunit=True)
|
||||
|
||||
def test_testr_no_pretty_passing(self):
|
||||
self.assertRunExit('ostestr --no-pretty --regex passing', 0)
|
||||
|
||||
def test_testr_no_pretty_fails(self):
|
||||
self.assertRunExit('ostestr --no-pretty', 1)
|
||||
|
||||
def test_list(self):
|
||||
self.assertRunExit('ostestr --list', 0)
|
9
requirements.txt
Normal file
9
requirements.txt
Normal file
@ -0,0 +1,9 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=0.6,!=0.7,<1.0
|
||||
Babel>=1.3
|
||||
testrepository>=0.0.18
|
||||
python-subunit>=0.0.18
|
||||
testtools>=0.9.36,!=1.2.0
|
59
setup.cfg
Normal file
59
setup.cfg
Normal file
@ -0,0 +1,59 @@
|
||||
[metadata]
|
||||
name = os-testr
|
||||
summary = A testr wrapper to provide functionality for OpenStack projects
|
||||
description-file =
|
||||
README.rst
|
||||
author = OpenStack
|
||||
author-email = openstack-dev@lists.openstack.org
|
||||
home-page = http://www.openstack.org/
|
||||
classifier =
|
||||
Environment :: OpenStack
|
||||
Intended Audience :: Information Technology
|
||||
Intended Audience :: System Administrators
|
||||
License :: OSI Approved :: Apache Software License
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python
|
||||
Programming Language :: Python :: 2
|
||||
Programming Language :: Python :: 2.7
|
||||
Programming Language :: Python :: 2.6
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.3
|
||||
Programming Language :: Python :: 3.4
|
||||
|
||||
[files]
|
||||
packages =
|
||||
os_testr
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
subunit-trace = os_testr.subunit_trace:main
|
||||
ostestr = os_testr.os_testr:main
|
||||
subunit2html = os_testr.subunit2html:main
|
||||
|
||||
[build_sphinx]
|
||||
source-dir = doc/source
|
||||
build-dir = doc/build
|
||||
all_files = 1
|
||||
|
||||
[upload_sphinx]
|
||||
upload-dir = doc/build/html
|
||||
|
||||
[compile_catalog]
|
||||
directory = os_testr/locale
|
||||
domain = os-testr
|
||||
|
||||
[update_catalog]
|
||||
domain = os-testr
|
||||
output_dir = os_testr/locale
|
||||
input_file = os_testr/locale/os-testr.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ gettext ngettext l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = os_testr/locale/os-testr.pot
|
||||
|
||||
[egg_info]
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
tag_build =
|
||||
|
30
setup.py
Executable file
30
setup.py
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||
import setuptools
|
||||
|
||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||
# setuptools if some other modules registered functions in `atexit`.
|
||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||
try:
|
||||
import multiprocessing # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
setuptools.setup(
|
||||
setup_requires=['pbr'],
|
||||
pbr=True)
|
12
test-requirements.txt
Normal file
12
test-requirements.txt
Normal file
@ -0,0 +1,12 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
hacking<0.11,>=0.10.0
|
||||
|
||||
coverage>=3.6
|
||||
discover
|
||||
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
|
||||
oslosphinx>=2.2.0 # Apache-2.0
|
||||
oslotest>=1.2.0 # Apache-2.0
|
||||
testscenarios>=0.4
|
36
tox.ini
Normal file
36
tox.ini
Normal file
@ -0,0 +1,36 @@
|
||||
[tox]
|
||||
minversion = 1.6
|
||||
envlist = py33,py34,py26,py27,pypy,pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
usedevelop = True
|
||||
install_command = pip install -U {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py testr --slowest --testr-args='{posargs}'
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[testenv:debug]
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
[flake8]
|
||||
# E123, E125 skipped as they are invalid PEP-8.
|
||||
|
||||
show-source = True
|
||||
ignore = E123,E125
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
|
Loading…
x
Reference in New Issue
Block a user