From d1049b1ad614f52815a9d0c9ab3dc6a899442840 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 14:58:46 -0400 Subject: [PATCH 01/23] Initial Cookiecutter Commit. --- .coveragerc | 7 ++ .gitignore | 53 ++++++++++ .gitreview | 4 + .mailmap | 3 + .testr.conf | 7 ++ CONTRIBUTING.rst | 16 +++ HACKING.rst | 4 + LICENSE | 176 ++++++++++++++++++++++++++++++++ MANIFEST.in | 6 ++ README.rst | 15 +++ babel.cfg | 2 + doc/source/conf.py | 75 ++++++++++++++ doc/source/contributing.rst | 4 + doc/source/index.rst | 25 +++++ doc/source/installation.rst | 12 +++ doc/source/readme.rst | 1 + doc/source/usage.rst | 7 ++ openstack-common.conf | 6 ++ os_testr/__init__.py | 19 ++++ os_testr/tests/__init__.py | 0 os_testr/tests/base.py | 23 +++++ os_testr/tests/test_os_testr.py | 28 +++++ requirements.txt | 6 ++ setup.cfg | 47 +++++++++ setup.py | 30 ++++++ test-requirements.txt | 15 +++ tox.ini | 36 +++++++ 27 files changed, 627 insertions(+) create mode 100644 .coveragerc create mode 100644 .gitignore create mode 100644 .gitreview create mode 100644 .mailmap create mode 100644 .testr.conf create mode 100644 CONTRIBUTING.rst create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 doc/source/conf.py create mode 100644 doc/source/contributing.rst create mode 100644 doc/source/index.rst create mode 100644 doc/source/installation.rst create mode 100644 doc/source/readme.rst create mode 100644 doc/source/usage.rst create mode 100644 openstack-common.conf create mode 100644 os_testr/__init__.py create mode 100644 os_testr/tests/__init__.py create mode 100644 os_testr/tests/base.py create mode 100644 os_testr/tests/test_os_testr.py create mode 100644 requirements.txt create mode 100644 setup.cfg create mode 100755 setup.py create mode 100644 test-requirements.txt create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..b97ce41 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True +source = os_testr +omit = os_testr/tests/*,os_testr/openstack/* + +[report] +ignore-errors = True diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8a3c704 --- /dev/null +++ b/.gitignore @@ -0,0 +1,53 @@ +*.py[cod] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +nosetests.xml +.testrepository +.venv + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# Complexity +output/*.html +output/*/index.html + +# Sphinx +doc/build + +# pbr generates these +AUTHORS +ChangeLog + +# Editors +*~ +.*.swp +.*sw? diff --git a/.gitreview b/.gitreview new file mode 100644 index 0000000..fb67c90 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/os-testr.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000..516ae6f --- /dev/null +++ b/.mailmap @@ -0,0 +1,3 @@ +# Format is: +# <preferred e-mail> <other e-mail 1> +# <preferred e-mail> <other e-mail 2> diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000..6d83b3c --- /dev/null +++ b/.testr.conf @@ -0,0 +1,7 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ + ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..863c087 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in this page: + + http://docs.openstack.org/infra/manual/developers.html + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/os-testr diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000..921d4d6 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,4 @@ +os-testr Style Commandments +=============================================== + +Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..68c771a --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..c978a52 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview + +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..eeb27cf --- /dev/null +++ b/README.rst @@ -0,0 +1,15 @@ +=============================== +os-testr +=============================== + +A testr wrapper to provide functionality for OpenStack projects + +* Free software: Apache license +* Documentation: http://docs.openstack.org/developer/os-testr +* Source: http://git.openstack.org/cgit/openstack/os-testr +* Bugs: http://bugs.launchpad.net/os-testr + +Features +-------- + +* TODO diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000..15cd6cb --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100755 index 0000000..251d672 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +sys.path.insert(0, os.path.abspath('../..')) +# -- General configuration ---------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + #'sphinx.ext.intersphinx', + 'oslosphinx' +] + +# autodoc generation is a bit aggressive and a nuisance when doing heavy +# text edit cycles. +# execute "export SPHINX_DEBUG=1" in your terminal to disable + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'os-testr' +copyright = u'2013, OpenStack Foundation' + +# If true, '()' will be appended to :func: etc. cross-reference text. +add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +add_module_names = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# -- Options for HTML output -------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' +# html_static_path = ['static'] + +# Output file base name for HTML help builder. +htmlhelp_basename = '%sdoc' % project + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto/manual]). +latex_documents = [ + ('index', + '%s.tex' % project, + u'%s Documentation' % project, + u'OpenStack Foundation', 'manual'), +] + +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst new file mode 100644 index 0000000..1728a61 --- /dev/null +++ b/doc/source/contributing.rst @@ -0,0 +1,4 @@ +============ +Contributing +============ +.. include:: ../../CONTRIBUTING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000..feb274b --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,25 @@ +.. os-testr documentation master file, created by + sphinx-quickstart on Tue Jul 9 22:26:36 2013. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to os-testr's documentation! +======================================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + readme + installation + usage + contributing + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/source/installation.rst b/doc/source/installation.rst new file mode 100644 index 0000000..8264927 --- /dev/null +++ b/doc/source/installation.rst @@ -0,0 +1,12 @@ +============ +Installation +============ + +At the command line:: + + $ pip install os-testr + +Or, if you have virtualenvwrapper installed:: + + $ mkvirtualenv os-testr + $ pip install os-testr diff --git a/doc/source/readme.rst b/doc/source/readme.rst new file mode 100644 index 0000000..a6210d3 --- /dev/null +++ b/doc/source/readme.rst @@ -0,0 +1 @@ +.. include:: ../../README.rst diff --git a/doc/source/usage.rst b/doc/source/usage.rst new file mode 100644 index 0000000..a5b21da --- /dev/null +++ b/doc/source/usage.rst @@ -0,0 +1,7 @@ +======== +Usage +======== + +To use os-testr in a project:: + + import os_testr diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 0000000..29ef5fa --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,6 @@ +[DEFAULT] + +# The list of modules to copy from oslo-incubator.git + +# The base module to hold the copy of openstack.common +base=os_testr diff --git a/os_testr/__init__.py b/os_testr/__init__.py new file mode 100644 index 0000000..f8a2a30 --- /dev/null +++ b/os_testr/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + + +__version__ = pbr.version.VersionInfo( + 'os_testr').version_string() diff --git a/os_testr/tests/__init__.py b/os_testr/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/os_testr/tests/base.py b/os_testr/tests/base.py new file mode 100644 index 0000000..1c30cdb --- /dev/null +++ b/os_testr/tests/base.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- + +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslotest import base + + +class TestCase(base.BaseTestCase): + + """Test case base class for all unit tests.""" diff --git a/os_testr/tests/test_os_testr.py b/os_testr/tests/test_os_testr.py new file mode 100644 index 0000000..3f64916 --- /dev/null +++ b/os_testr/tests/test_os_testr.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_os_testr +---------------------------------- + +Tests for `os_testr` module. +""" + +from os_testr.tests import base + + +class TestOs_testr(base.TestCase): + + def test_something(self): + pass diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..95137a6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pbr>=0.6,!=0.7,<1.0 +Babel>=1.3 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..57f7046 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,47 @@ +[metadata] +name = os-testr +summary = A testr wrapper to provide functionality for OpenStack projects +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.3 + Programming Language :: Python :: 3.4 + +[files] +packages = + os_testr + +[build_sphinx] +source-dir = doc/source +build-dir = doc/build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/build/html + +[compile_catalog] +directory = os_testr/locale +domain = os-testr + +[update_catalog] +domain = os-testr +output_dir = os_testr/locale +input_file = os_testr/locale/os-testr.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = os_testr/locale/os-testr.pot diff --git a/setup.py b/setup.py new file mode 100755 index 0000000..7363757 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..8592bde --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,15 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +hacking<0.11,>=0.10.0 + +coverage>=3.6 +discover +python-subunit>=0.0.18 +sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 +oslosphinx>=2.2.0 # Apache-2.0 +oslotest>=1.2.0 # Apache-2.0 +testrepository>=0.0.18 +testscenarios>=0.4 +testtools>=0.9.36,!=1.2.0 diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..4657203 --- /dev/null +++ b/tox.ini @@ -0,0 +1,36 @@ +[tox] +minversion = 1.6 +envlist = py33,py34,py26,py27,pypy,pep8 +skipsdist = True + +[testenv] +usedevelop = True +install_command = pip install -U {opts} {packages} +setenv = + VIRTUAL_ENV={envdir} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = python setup.py testr --slowest --testr-args='{posargs}' + +[testenv:pep8] +commands = flake8 + +[testenv:venv] +commands = {posargs} + +[testenv:cover] +commands = python setup.py testr --coverage --testr-args='{posargs}' + +[testenv:docs] +commands = python setup.py build_sphinx + +[testenv:debug] +commands = oslo_debug_helper {posargs} + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. + +show-source = True +ignore = E123,E125 +builtins = _ +exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build From 898cd1eab58b53901b3d12103b410ac1f24da365 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 16:23:32 -0400 Subject: [PATCH 02/23] Add subunit-trace and ostestr This commit migrates the current subunit trace commit from tempest-lib with the commits: d7c3f6b Merge "Summarize expected failures" e29ec71 Summarize expected failures 21e3f6a Enable stdout passthrough for subunit-trace d588748 Default the worker number to 0 not NaN 87c1442 Fix subunit-trace on python < 2.7 b73b9eb bring over fail only functionality from nova 5715fd6 Switch to elapsed time in subunit-trace summary d2e4040 Setup subunit-trace as an entry point and also adds the start of the ostestr command to wrap testr. --- os_testr/os_testr.py | 129 ++++++++++++++++++ os_testr/subunit_trace.py | 279 ++++++++++++++++++++++++++++++++++++++ requirements.txt | 2 + setup.cfg | 5 + 4 files changed, 415 insertions(+) create mode 100755 os_testr/os_testr.py create mode 100755 os_testr/subunit_trace.py diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py new file mode 100755 index 0000000..ff9918a --- /dev/null +++ b/os_testr/os_testr.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python2 +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os +import subprocess +import sys + +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Tool to run openstack tests') + parser.add_argument('--blacklist_file', '-b', + help='Path to a blacklist file, this file contains a' + ' separate regex exclude on each newline') + parser.add_argument('--regex', '-r', + help='A normal testr selection regex. If a blacklist ' + 'file is specified, the regex will be appended ' + 'to the end of the generated regex from that ' + 'file') + parser.add_argument('--pretty', '-p', default=True, + help='Print pretty output from subunit-trace. This is ' + 'mutually exclusive with --subunit') + parser.add_argument('--subunit', '-s', action='store_true', + help='output the raw subunit v2 from the test run ' + 'this is mutuall exclusive with --pretty') + parser.add_argument('--list', '-l', action='store_true', + help='List all the tests which will be run.') + parser.add_argument('--no-discover', '-n', + help="Takes in a single test to bypasses test " + "discover and just excute the test specified") + parser.add_argument('--slowest', default=True, + help="after the test run print the slowest tests") + opts = parser.parse_args() + return opts + + +def construct_regex(blacklist_file, regex): + if not blacklist_file: + exclude_regex = '' + else: + black_file = open(blacklist_file, 'r') + exclude_regex = '' + for line in black_file: + regex = line.strip() + exclude_regex = '|'.join([regex, exclude_regex]) + if exclude_regex: + exclude_regex = "'(?!.*" + exclude_regex + ")" + if regex: + exclude_regex += regex + return exclude_regex + + +def call_testr(regex, subunit, pretty, list_tests, slowest): + cmd = ['testr', 'run', '--parallel'] + + if list_tests: + cmd = ['testr', 'list-tests'] + elif subunit or pretty: + cmd.append('--subunit') + cmd.append(regex) + env = copy.deepcopy(os.environ) + if pretty and not list_tests: + ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) + proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'], + env=env, stdin=ps.stdout) + ps.stdout.close() + else: + proc = subprocess.Popen(cmd, env=env) + return_code = proc.communicate()[0] + if slowest and not list_tests: + print("\nSlowest Tests:\n") + slow_proc = subprocess.Popen(['testr', 'slowest'], env=env) + slow_proc.communicate() + return return_code + + +def call_subunit_run(test_id, pretty): + cmd = ['python', '-m', 'subunit.run', test_id] + env = copy.deepcopy(os.environ) + if pretty: + ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) + proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'], + env=env, stdin=ps.stdout) + ps.stdout.close() + else: + proc = subprocess.Popen(cmd, env=env) + proc = subprocess.Popen(cmd) + return_code = proc.communicate()[0] + return return_code + + +def main(): + opts = parse_args() + if opts.pretty and opts.subunit: + msg = ('Subunit output and pretty output cannot be specified at the ' + 'same time') + print(msg) + exit(2) + if opts.list and opts.no_discover: + msg = ('you can not list tests when you are bypassing discovery to ' + 'run a single test') + print(msg) + exit(3) + exclude_regex = construct_regex(opts.blacklist_file, opts.regex) + if not os.path.isdir('.testrepository'): + subprocess.call('testr init') + if not opts.no_discover: + exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, + opts.slowest)) + else: + exit(call_subunit_run(opts.no_discover, opts.pretty)) + +if __name__ == '__main__': + main() diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py new file mode 100755 index 0000000..5b69fb6 --- /dev/null +++ b/os_testr/subunit_trace.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python + +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# Copyright 2014 Samsung Electronics +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Trace a subunit stream in reasonable detail and high accuracy.""" + +import argparse +import datetime +import functools +import os +import re +import sys + +import subunit +import testtools + +DAY_SECONDS = 60 * 60 * 24 +FAILS = [] +RESULTS = {} + + +def total_seconds(timedelta): + # NOTE(mtreinish): This method is built-in to the timedelta class in + # python >= 2.7 it is here to enable it's use on older versions + return ((timedelta.days * DAY_SECONDS + timedelta.seconds) * 10 ** 6 + + timedelta.microseconds) / 10 ** 6 + + +def cleanup_test_name(name, strip_tags=True, strip_scenarios=False): + """Clean up the test name for display. + + By default we strip out the tags in the test because they don't help us + in identifying the test that is run to it's result. + + Make it possible to strip out the testscenarios information (not to + be confused with tempest scenarios) however that's often needed to + indentify generated negative tests. + """ + if strip_tags: + tags_start = name.find('[') + tags_end = name.find(']') + if tags_start > 0 and tags_end > tags_start: + newname = name[:tags_start] + newname += name[tags_end + 1:] + name = newname + + if strip_scenarios: + tags_start = name.find('(') + tags_end = name.find(')') + if tags_start > 0 and tags_end > tags_start: + newname = name[:tags_start] + newname += name[tags_end + 1:] + name = newname + + return name + + +def get_duration(timestamps): + start, end = timestamps + if not start or not end: + duration = '' + else: + delta = end - start + duration = '%d.%06ds' % ( + delta.days * DAY_SECONDS + delta.seconds, delta.microseconds) + return duration + + +def find_worker(test): + """Get the worker number. + + If there are no workers because we aren't in a concurrent environment, + assume the worker number is 0. + """ + for tag in test['tags']: + if tag.startswith('worker-'): + return int(tag[7:]) + return 0 + + +# Print out stdout/stderr if it exists, always +def print_attachments(stream, test, all_channels=False): + """Print out subunit attachments. + + Print out subunit attachments that contain content. This + runs in 2 modes, one for successes where we print out just stdout + and stderr, and an override that dumps all the attachments. + """ + channels = ('stdout', 'stderr') + for name, detail in test['details'].items(): + # NOTE(sdague): the subunit names are a little crazy, and actually + # are in the form pythonlogging:'' (with the colon and quotes) + name = name.split(':')[0] + if detail.content_type.type == 'test': + detail.content_type.type = 'text' + if (all_channels or name in channels) and detail.as_text(): + title = "Captured %s:" % name + stream.write("\n%s\n%s\n" % (title, ('~' * len(title)))) + # indent attachment lines 4 spaces to make them visually + # offset + for line in detail.as_text().split('\n'): + stream.write(" %s\n" % line) + + +def show_outcome(stream, test, print_failures=False, failonly=False): + global RESULTS + status = test['status'] + # TODO(sdague): ask lifeless why on this? + if status == 'exists': + return + + worker = find_worker(test) + name = cleanup_test_name(test['id']) + duration = get_duration(test['timestamps']) + + if worker not in RESULTS: + RESULTS[worker] = [] + RESULTS[worker].append(test) + + # don't count the end of the return code as a fail + if name == 'process-returncode': + return + + if status == 'fail': + FAILS.append(test) + stream.write('{%s} %s [%s] ... FAILED\n' % ( + worker, name, duration)) + if not print_failures: + print_attachments(stream, test, all_channels=True) + elif not failonly: + if status == 'success': + stream.write('{%s} %s [%s] ... ok\n' % ( + worker, name, duration)) + print_attachments(stream, test) + elif status == 'skip': + stream.write('{%s} %s ... SKIPPED: %s\n' % ( + worker, name, test['details']['reason'].as_text())) + else: + stream.write('{%s} %s [%s] ... %s\n' % ( + worker, name, duration, test['status'])) + if not print_failures: + print_attachments(stream, test, all_channels=True) + + stream.flush() + + +def print_fails(stream): + """Print summary failure report. + + Currently unused, however there remains debate on inline vs. at end + reporting, so leave the utility function for later use. + """ + if not FAILS: + return + stream.write("\n==============================\n") + stream.write("Failed %s tests - output below:" % len(FAILS)) + stream.write("\n==============================\n") + for f in FAILS: + stream.write("\n%s\n" % f['id']) + stream.write("%s\n" % ('-' * len(f['id']))) + print_attachments(stream, f, all_channels=True) + stream.write('\n') + + +def count_tests(key, value): + count = 0 + for k, v in RESULTS.items(): + for item in v: + if key in item: + if re.search(value, item[key]): + count += 1 + return count + + +def run_time(): + runtime = 0.0 + for k, v in RESULTS.items(): + for test in v: + runtime += float(get_duration(test['timestamps']).strip('s')) + return runtime + + +def worker_stats(worker): + tests = RESULTS[worker] + num_tests = len(tests) + delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0] + return num_tests, delta + + +def print_summary(stream, elapsed_time): + stream.write("\n======\nTotals\n======\n") + stream.write("Ran: %s tests in %.4f sec.\n" % ( + count_tests('status', '.*'), total_seconds(elapsed_time))) + stream.write(" - Passed: %s\n" % count_tests('status', '^success$')) + stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$')) + stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$')) + stream.write(" - Unexpected Success: %s\n" % count_tests('status', + '^uxsuccess$')) + stream.write(" - Failed: %s\n" % count_tests('status', '^fail$')) + stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time()) + + # we could have no results, especially as we filter out the process-codes + if RESULTS: + stream.write("\n==============\nWorker Balance\n==============\n") + + for w in range(max(RESULTS.keys()) + 1): + if w not in RESULTS: + stream.write( + " - WARNING: missing Worker %s! " + "Race in testr accounting.\n" % w) + else: + num, time = worker_stats(w) + stream.write(" - Worker %s (%s tests) => %ss\n" % + (w, num, time)) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--no-failure-debug', '-n', action='store_true', + dest='print_failures', help='Disable printing failure ' + 'debug information in realtime') + parser.add_argument('--fails', '-f', action='store_true', + dest='post_fails', help='Print failure debug ' + 'information after the stream is proccesed') + parser.add_argument('--failonly', action='store_true', + dest='failonly', help="Don't print success items", + default=( + os.environ.get('TRACE_FAILONLY', False) + is not False)) + return parser.parse_args() + + +def main(): + args = parse_args() + stream = subunit.ByteStreamToStreamResult( + sys.stdin, non_subunit_name='stdout') + outcomes = testtools.StreamToDict( + functools.partial(show_outcome, sys.stdout, + print_failures=args.print_failures, + failonly=args.failonly)) + summary = testtools.StreamSummary() + result = testtools.CopyStreamResult([outcomes, summary]) + result = testtools.StreamResultRouter(result) + cat = subunit.test_results.CatFiles(sys.stdout) + result.add_rule(cat, 'test_id', test_id=None) + start_time = datetime.datetime.utcnow() + result.startTestRun() + try: + stream.run(result) + finally: + result.stopTestRun() + stop_time = datetime.datetime.utcnow() + elapsed_time = stop_time - start_time + + if count_tests('status', '.*') == 0: + print("The test run didn't actually run any tests") + exit(1) + if args.post_fails: + print_fails(sys.stdout) + print_summary(sys.stdout, elapsed_time) + exit(0 if summary.wasSuccessful() else 1) + + +if __name__ == '__main__': + main() diff --git a/requirements.txt b/requirements.txt index 95137a6..9b50a0b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,5 @@ pbr>=0.6,!=0.7,<1.0 Babel>=1.3 +testrepository>=0.0.18 +python-subunit>=0.0.18 diff --git a/setup.cfg b/setup.cfg index 57f7046..0f883c5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,6 +24,11 @@ classifier = packages = os_testr +[entry_points] +console_scripts = + subunit-trace = os_testr.subunit_trace:main + ostestr = os_testr.os_testr:main + [build_sphinx] source-dir = doc/source build-dir = doc/build From 898dbbbda66b842b9477d473788e929bd18f8572 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 16:42:27 -0400 Subject: [PATCH 03/23] Add --pdb flag and fix --no-discover flag This commit adds a --pdb flag which is similar to --no-discover except that instead of running subunit.run test_id to bypass test discovery, testtools.run test_id is used so that pdb traces set in the code are useable. As part of this several fixes in the --no-discover path were added since the codes is similar. --- os_testr/os_testr.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index ff9918a..ca7f50b 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -45,6 +45,8 @@ def parse_args(): "discover and just excute the test specified") parser.add_argument('--slowest', default=True, help="after the test run print the slowest tests") + parser.add_argument('--pdb', + help='Run a single test that has pdb traces added') opts = parser.parse_args() return opts @@ -99,10 +101,15 @@ def call_subunit_run(test_id, pretty): ps.stdout.close() else: proc = subprocess.Popen(cmd, env=env) - proc = subprocess.Popen(cmd) return_code = proc.communicate()[0] return return_code +def call_testtools_run(test_id): + cmd = ['python', '-m', 'testtools.run', test_id] + env = copy.deepcopy(os.environ) + proc = subprocess.Popen(cmd, env=env) + return_code = proc.communicate()[0] + return return_code def main(): opts = parse_args() @@ -119,9 +126,11 @@ def main(): exclude_regex = construct_regex(opts.blacklist_file, opts.regex) if not os.path.isdir('.testrepository'): subprocess.call('testr init') - if not opts.no_discover: + if not opts.no_discover and not opts.pdb: exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, opts.slowest)) + elif opts.pdb: + exit(call_testtools_run(opts.pdb)) else: exit(call_subunit_run(opts.no_discover, opts.pretty)) From b914b96d4f8f034f3882234f1505e7c28e6a61f5 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 18:55:17 -0400 Subject: [PATCH 04/23] Fix return code on test run failure This commit fixes the return code handling to ensure we return the same return code as the called test runner. --- os_testr/os_testr.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index ca7f50b..d77f902 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -83,7 +83,8 @@ def call_testr(regex, subunit, pretty, list_tests, slowest): ps.stdout.close() else: proc = subprocess.Popen(cmd, env=env) - return_code = proc.communicate()[0] + proc.communicate() + return_code = proc.returncode if slowest and not list_tests: print("\nSlowest Tests:\n") slow_proc = subprocess.Popen(['testr', 'slowest'], env=env) @@ -101,14 +102,16 @@ def call_subunit_run(test_id, pretty): ps.stdout.close() else: proc = subprocess.Popen(cmd, env=env) - return_code = proc.communicate()[0] + proc.communicate() + return_code = proc.returncode return return_code def call_testtools_run(test_id): cmd = ['python', '-m', 'testtools.run', test_id] env = copy.deepcopy(os.environ) proc = subprocess.Popen(cmd, env=env) - return_code = proc.communicate()[0] + proc.communicate() + return_code = proc.returncode return return_code def main(): From f67bff41a1aca2b20383742334f3ed955f46a3f5 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 18:59:38 -0400 Subject: [PATCH 05/23] Fix pep8 issues This commit fixes a couple of issues from running flake8 for the first time. Mostly minor whitespace fixes, but we'll need this for when we move this repo over into openstack's ci system. --- os_testr/os_testr.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index d77f902..9bd8914 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -16,7 +16,6 @@ import copy import os import subprocess -import sys import argparse @@ -106,6 +105,7 @@ def call_subunit_run(test_id, pretty): return_code = proc.returncode return return_code + def call_testtools_run(test_id): cmd = ['python', '-m', 'testtools.run', test_id] env = copy.deepcopy(os.environ) @@ -114,6 +114,7 @@ def call_testtools_run(test_id): return_code = proc.returncode return return_code + def main(): opts = parse_args() if opts.pretty and opts.subunit: From eb6195b16efc943fc53e3c0c78771204dffedda9 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 19:13:06 -0400 Subject: [PATCH 06/23] Improve the arguments for ostestr This commit adds a few more details and missing negative options to the ostestr arguments. --- os_testr/os_testr.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index 9bd8914..bcf640d 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -31,9 +31,11 @@ def parse_args(): 'file is specified, the regex will be appended ' 'to the end of the generated regex from that ' 'file') - parser.add_argument('--pretty', '-p', default=True, + parser.add_argument('--pretty', '-p', dest='pretty', action='store_true', help='Print pretty output from subunit-trace. This is ' 'mutually exclusive with --subunit') + parser.add_argument('--no-pretty', dest='pretty', action='store_false', + help='Disable the pretty output with subunit-trace') parser.add_argument('--subunit', '-s', action='store_true', help='output the raw subunit v2 from the test run ' 'this is mutuall exclusive with --pretty') @@ -42,10 +44,14 @@ def parse_args(): parser.add_argument('--no-discover', '-n', help="Takes in a single test to bypasses test " "discover and just excute the test specified") - parser.add_argument('--slowest', default=True, + parser.add_argument('--slowest', dest='slowest', action='store_true', help="after the test run print the slowest tests") + parser.add_argument('--no-slowest', dest='slowest', action='store_false', + help="after the test run don't print the slowest " + "tests") parser.add_argument('--pdb', help='Run a single test that has pdb traces added') + parser.set_defaults(pretty=True, slowest=True) opts = parser.parse_args() return opts From 51e4fb6b9f43c267e9ea24ea4a3978d9bc9fd1e0 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 19:18:48 -0400 Subject: [PATCH 07/23] Fix the testr init subprocess call The subprocess.call() to run testr init if the .testrepository dir hasn't been created yet was setting passing the actual call assuming shell=True, which it wasn't. This commit fixes this oversight to make the call actually work. --- os_testr/os_testr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index bcf640d..8c74d2e 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -135,7 +135,7 @@ def main(): exit(3) exclude_regex = construct_regex(opts.blacklist_file, opts.regex) if not os.path.isdir('.testrepository'): - subprocess.call('testr init') + subprocess.call(['testr', 'init']) if not opts.no_discover and not opts.pdb: exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, opts.slowest)) From 8448bd574e06d68d290a02b8b3b9c79a1720cc1a Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 19:40:35 -0400 Subject: [PATCH 08/23] Add ostestr options to control parallelism This commit adds options to ostestr to control the parallelism and concurrency of testr. By default it will run in parallel with concurrency equal to the number of cpus on the system. This commit also modifies the metavar values on the no-discover and pdb options to make them more descriptive. --- os_testr/os_testr.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index 8c74d2e..3aa365d 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -41,7 +41,7 @@ def parse_args(): 'this is mutuall exclusive with --pretty') parser.add_argument('--list', '-l', action='store_true', help='List all the tests which will be run.') - parser.add_argument('--no-discover', '-n', + parser.add_argument('--no-discover', '-n', metavar='TEST_ID', help="Takes in a single test to bypasses test " "discover and just excute the test specified") parser.add_argument('--slowest', dest='slowest', action='store_true', @@ -49,9 +49,16 @@ def parse_args(): parser.add_argument('--no-slowest', dest='slowest', action='store_false', help="after the test run don't print the slowest " "tests") - parser.add_argument('--pdb', + parser.add_argument('--pdb', metavar='TEST_ID', help='Run a single test that has pdb traces added') - parser.set_defaults(pretty=True, slowest=True) + parser.add_argument('--parallel', dest='parallel', action='store_true', + help='Run tests in parallel (this is the default)') + parser.add_argument('--serial', dest='parallel', action='store_false', + help='Run tests serially') + parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS', + help='The number of workers to use when running in ' + 'parallel. By default this is the number of cpus') + parser.set_defaults(pretty=True, slowest=True, parallel=True) opts = parser.parse_args() return opts @@ -72,9 +79,13 @@ def construct_regex(blacklist_file, regex): return exclude_regex -def call_testr(regex, subunit, pretty, list_tests, slowest): - cmd = ['testr', 'run', '--parallel'] - +def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur): + if parallel: + cmd = ['testr', 'run', '--parallel'] + if concur: + cmd.append('--concurrency=%s' % concur) + else: + cmd = ['testr', 'run'] if list_tests: cmd = ['testr', 'list-tests'] elif subunit or pretty: @@ -133,12 +144,16 @@ def main(): 'run a single test') print(msg) exit(3) + if not opts.parallel and opts.concurrency: + msg = "You can't specify a concurrency to use when running serially" + print(msg) + exit(4) exclude_regex = construct_regex(opts.blacklist_file, opts.regex) if not os.path.isdir('.testrepository'): subprocess.call(['testr', 'init']) if not opts.no_discover and not opts.pdb: exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, - opts.slowest)) + opts.slowest, opts.parallel, opts.concurrency)) elif opts.pdb: exit(call_testtools_run(opts.pdb)) else: From 6e1bb16cabd8af76fb0d63035da02c4c05d04eb4 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 17 Mar 2015 19:55:43 -0400 Subject: [PATCH 09/23] Use python APIs to call run modules in ostestr This commit switches from using subprocess to call testtools.run and subunit.run with python -m to directly calling the methods being run. This should make the non-default cases when using subunit.run and testtools.run faster, and it simplifies the code. As part of this, the code around call_subunit is fixed to make sure the it works as expected given different argument combinations. ostestr will still call subprocess to run testr, because the interface is more complex, and when subunit.run is used with subunit-trace, because the stdin handling is tricky. The subunit.run with subunit-trace case will be handled in a later patch. --- os_testr/os_testr.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index 3aa365d..bdebf1d 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -13,11 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +import argparse import copy import os import subprocess +import sys -import argparse +from subunit import run as subunit_run +from testtools import run as testtools_run def parse_args(): @@ -108,28 +111,24 @@ def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur): return return_code -def call_subunit_run(test_id, pretty): - cmd = ['python', '-m', 'subunit.run', test_id] - env = copy.deepcopy(os.environ) +def call_subunit_run(test_id, pretty, subunit): if pretty: + env = copy.deepcopy(os.environ) + cmd = ['python', '-m', 'subunit.run', test_id] ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'], env=env, stdin=ps.stdout) ps.stdout.close() + proc.communicate() + return proc.returncode + elif subunit: + subunit_run.main([sys.argv[0], test_id], sys.stdout) else: - proc = subprocess.Popen(cmd, env=env) - proc.communicate() - return_code = proc.returncode - return return_code + testtools_run.main([sys.argv[0], test_id], sys.stdout) def call_testtools_run(test_id): - cmd = ['python', '-m', 'testtools.run', test_id] - env = copy.deepcopy(os.environ) - proc = subprocess.Popen(cmd, env=env) - proc.communicate() - return_code = proc.returncode - return return_code + testtools_run.main([sys.argv[0], test_id], sys.stdout) def main(): @@ -157,7 +156,7 @@ def main(): elif opts.pdb: exit(call_testtools_run(opts.pdb)) else: - exit(call_subunit_run(opts.no_discover, opts.pretty)) + exit(call_subunit_run(opts.no_discover, opts.pretty, opts.subunit)) if __name__ == '__main__': main() From bbc8b8f54197f2c820c8f9207c0aa413e8bc566e Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Wed, 18 Feb 2015 17:43:45 -0500 Subject: [PATCH 10/23] Add percent change to duration on subunit-trace output This commit adds a percent change to the duration on individual test output lines. This is conditionally displayed based on the presence of a testrepository times.dbm file and data in that file for the test being run. If there is useable data subunit-trace will now use the runtimes from there to display how the current run has changed from the dbm file. A new threshold option is added to optionally specify a minimum percent change to be used to determine whether to display the value or not. Change-Id: I3d68425f48114531a78cab08c353111648ce3911 --- os_testr/subunit_trace.py | 40 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py index 5b69fb6..304c633 100755 --- a/os_testr/subunit_trace.py +++ b/os_testr/subunit_trace.py @@ -28,6 +28,13 @@ import sys import subunit import testtools +# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module +# was renamed to dbm.ndbm, this block takes that into account +try: + import anydbm as dbm +except ImportError: + import dbm + DAY_SECONDS = 60 * 60 * 24 FAILS = [] RESULTS = {} @@ -116,7 +123,24 @@ def print_attachments(stream, test, all_channels=False): stream.write(" %s\n" % line) -def show_outcome(stream, test, print_failures=False, failonly=False): +def find_test_run_time_diff(test_id, run_time): + times_db_path = os.path.join(os.path.join(os.getcwd(), '.testrepository'), + 'times.dbm') + if os.path.isfile(times_db_path): + try: + test_times = dbm.open(times_db_path) + except Exception: + return False + avg_runtime = float(test_times.get(str(test_id), False)) + if avg_runtime and avg_runtime > 0: + run_time = float(run_time.rstrip('s')) + perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100 + return perc_diff + return False + + +def show_outcome(stream, test, print_failures=False, failonly=False, + threshold='0'): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? @@ -143,8 +167,14 @@ def show_outcome(stream, test, print_failures=False, failonly=False): print_attachments(stream, test, all_channels=True) elif not failonly: if status == 'success': - stream.write('{%s} %s [%s] ... ok\n' % ( - worker, name, duration)) + out_string = '{%s} %s [%s' % (worker, name, duration) + perc_diff = find_test_run_time_diff(test['id'], duration) + if perc_diff and abs(perc_diff) >= abs(float(threshold)): + if perc_diff > 0: + out_string = out_string + ' +%.2f%%' % perc_diff + else: + out_string = out_string + ' %.2f%%' % perc_diff + stream.write(out_string + '] ... ok\n') print_attachments(stream, test) elif status == 'skip': stream.write('{%s} %s ... SKIPPED: %s\n' % ( @@ -241,6 +271,10 @@ def parse_args(): default=( os.environ.get('TRACE_FAILONLY', False) is not False)) + parser.add_argument('--diff-threshold', '-t', dest='threshold', + help="Threshold to use for displaying percent change " + "from the avg run time. If one is not specified " + "the percent change will always be displayed") return parser.parse_args() From f0e5175420ee22ccb1d3fa7757b9a5d0915a2d45 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Wed, 18 Mar 2015 22:47:31 -0400 Subject: [PATCH 11/23] Add basic unit tests to check ostestr return codes This commit adds some basic unit tests which runs ostestr in some basic configurations against a fake test suite to ensure that the ostestr always exits with a 0 on success and 1 on errors when running tests. This is invaluable for using ostestr in ci systems. --- os_testr/tests/files/__init__.py | 0 os_testr/tests/files/failing-tests | 23 ++++++ os_testr/tests/files/passing-tests | 23 ++++++ os_testr/tests/files/setup.cfg | 20 ++++++ os_testr/tests/files/testr-conf | 5 ++ os_testr/tests/test_return_codes.py | 104 ++++++++++++++++++++++++++++ requirements.txt | 1 + test-requirements.txt | 3 - 8 files changed, 176 insertions(+), 3 deletions(-) create mode 100644 os_testr/tests/files/__init__.py create mode 100644 os_testr/tests/files/failing-tests create mode 100644 os_testr/tests/files/passing-tests create mode 100644 os_testr/tests/files/setup.cfg create mode 100644 os_testr/tests/files/testr-conf create mode 100644 os_testr/tests/test_return_codes.py diff --git a/os_testr/tests/files/__init__.py b/os_testr/tests/files/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/os_testr/tests/files/failing-tests b/os_testr/tests/files/failing-tests new file mode 100644 index 0000000..78efc93 --- /dev/null +++ b/os_testr/tests/files/failing-tests @@ -0,0 +1,23 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +class FakeTestClass(testtools.TestCase): + def test_pass(self): + self.assertTrue(False) + + def test_pass_list(self): + test_list = ['test', 'a', 'b'] + self.assertIn('fail', test_list) diff --git a/os_testr/tests/files/passing-tests b/os_testr/tests/files/passing-tests new file mode 100644 index 0000000..a55cb1b --- /dev/null +++ b/os_testr/tests/files/passing-tests @@ -0,0 +1,23 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +class FakeTestClass(testtools.TestCase): + def test_pass(self): + self.assertTrue(True) + + def test_pass_list(self): + test_list = ['test', 'a', 'b'] + self.assertIn('test', test_list) diff --git a/os_testr/tests/files/setup.cfg b/os_testr/tests/files/setup.cfg new file mode 100644 index 0000000..f6f9f73 --- /dev/null +++ b/os_testr/tests/files/setup.cfg @@ -0,0 +1,20 @@ +[metadata] +name = tempest_unit_tests +version = 1 +summary = Fake Project for testing wrapper scripts +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Intended Audience :: Information Technology + Intended Audience :: System Administrators + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook diff --git a/os_testr/tests/files/testr-conf b/os_testr/tests/files/testr-conf new file mode 100644 index 0000000..d5ad083 --- /dev/null +++ b/os_testr/tests/files/testr-conf @@ -0,0 +1,5 @@ +[DEFAULT] +test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list +group_regex=([^\.]*\.)* diff --git a/os_testr/tests/test_return_codes.py b/os_testr/tests/test_return_codes.py new file mode 100644 index 0000000..591e4dd --- /dev/null +++ b/os_testr/tests/test_return_codes.py @@ -0,0 +1,104 @@ +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import shutil +import StringIO +import subprocess +import tempfile + +import testtools + +from os_testr.tests import base + +DEVNULL = open(os.devnull, 'wb') + + +class TestReturnCodes(base.TestCase): + def setUp(self): + super(TestReturnCodes, self).setUp() + # Setup test dirs + self.directory = tempfile.mkdtemp(prefix='ostestr-unit') + self.addCleanup(shutil.rmtree, self.directory) + self.test_dir = os.path.join(self.directory, 'tests') + os.mkdir(self.test_dir) + # Setup Test files + self.testr_conf_file = os.path.join(self.directory, '.testr.conf') + self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg') + self.passing_file = os.path.join(self.test_dir, 'test_passing.py') + self.failing_file = os.path.join(self.test_dir, 'test_failing.py') + self.init_file = os.path.join(self.test_dir, '__init__.py') + self.setup_py = os.path.join(self.directory, 'setup.py') + shutil.copy('os_testr/tests/files/testr-conf', self.testr_conf_file) + shutil.copy('os_testr/tests/files/passing-tests', self.passing_file) + shutil.copy('os_testr/tests/files/failing-tests', self.failing_file) + shutil.copy('setup.py', self.setup_py) + shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file) + shutil.copy('os_testr/tests/files/__init__.py', self.init_file) + + self.stdout = StringIO.StringIO() + self.stderr = StringIO.StringIO() + # Change directory, run wrapper and check result + self.addCleanup(os.chdir, os.path.abspath(os.curdir)) + os.chdir(self.directory) + + def assertRunExit(self, cmd, expected, subunit=False): + p = subprocess.Popen( + "%s" % cmd, shell=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + + if not subunit: + self.assertEqual( + p.returncode, expected, + "Stdout: %s; Stderr: %s" % (out, err)) + else: + self.assertEqual(p.returncode, expected, + "Expected return code: %s doesn't match actual " + "return code of: %s" % (expected, p.returncode)) + + def test_default_passing(self): + self.assertRunExit('ostestr --regex passing', 0) + + def test_default_fails(self): + self.assertRunExit('ostestr', 1) + + def test_default_passing_no_slowest(self): + self.assertRunExit('ostestr --no-slowest --regex passing', 0) + + def test_default_fails_no_slowest(self): + self.assertRunExit('ostestr --no-slowest', 1) + + def test_default_serial_passing(self): + self.assertRunExit('ostestr --serial --regex passing', 0) + + def test_default_serial_fails(self): + self.assertRunExit('ostestr --serial', 1) + + def test_testr_subunit_passing(self): + self.assertRunExit('ostestr --no-pretty --subunit --regex passing', 0, + subunit=True) + + @testtools.skip('Skipped because of testrepository lp bug #1411804') + def test_testr_subunit_fails(self): + self.assertRunExit('ostestr --no-pretty --subunit', 1, subunit=True) + + def test_testr_no_pretty_passing(self): + self.assertRunExit('ostestr --no-pretty --regex passing', 0) + + def test_testr_no_pretty_fails(self): + self.assertRunExit('ostestr --no-pretty', 1) + + def test_list(self): + self.assertRunExit('ostestr --list', 0) diff --git a/requirements.txt b/requirements.txt index 9b50a0b..6b439b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ pbr>=0.6,!=0.7,<1.0 Babel>=1.3 testrepository>=0.0.18 python-subunit>=0.0.18 +testtools>=0.9.36,!=1.2.0 diff --git a/test-requirements.txt b/test-requirements.txt index 8592bde..c7208f0 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,10 +6,7 @@ hacking<0.11,>=0.10.0 coverage>=3.6 discover -python-subunit>=0.0.18 sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 oslosphinx>=2.2.0 # Apache-2.0 oslotest>=1.2.0 # Apache-2.0 -testrepository>=0.0.18 testscenarios>=0.4 -testtools>=0.9.36,!=1.2.0 From 6101ef52f9646f4e42d9dc117099560f241b9793 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Wed, 18 Mar 2015 23:36:42 -0400 Subject: [PATCH 12/23] Add --until-failure option to ostestr This commit adds an --until-failure option to ostestr which basically calls through to --until-failure on testr. The key difference is because of open issues with testr you can't use --until-failure with subunit output enabled. This would break just a straight passthrough if either pretty output or subunit output are enabled. This works around this by manually reimplementing this functionality by generating a test list and looping over it and running the tests with the desired output forever until a failure is encountered. The tradeoff here is that to do this the test operations are serialized. As part of this to make the pretty output not overly verbose an option is added to subunit-trace to disable the summary view from being printed. --- os_testr/os_testr.py | 79 +++++++++++++++++++++++++++++++++++++-- os_testr/subunit_trace.py | 6 ++- 2 files changed, 80 insertions(+), 5 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index bdebf1d..819414d 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -61,6 +61,11 @@ def parse_args(): parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS', help='The number of workers to use when running in ' 'parallel. By default this is the number of cpus') + parser.add_argument('--until-failure', action='store_true', + help='Run the tests in a loop until a failure is ' + 'encountered. Running with subunit or pretty' + 'output enable will force the loop to run tests' + 'serially') parser.set_defaults(pretty=True, slowest=True, parallel=True) opts = parser.parse_args() return opts @@ -82,7 +87,8 @@ def construct_regex(blacklist_file, regex): return exclude_regex -def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur): +def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur, + until_failure): if parallel: cmd = ['testr', 'run', '--parallel'] if concur: @@ -91,11 +97,71 @@ def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur): cmd = ['testr', 'run'] if list_tests: cmd = ['testr', 'list-tests'] - elif subunit or pretty: + elif (subunit or pretty) and not until_failure: cmd.append('--subunit') + elif not (subunit or pretty) and until_failure: + cmd.append('--until-failure') cmd.append(regex) env = copy.deepcopy(os.environ) - if pretty and not list_tests: + # This workaround is necessary because of lp bug 1411804 it's super hacky + # and makes tons of unfounded assumptions, but it works for the most part + if (subunit or pretty) and until_failure: + proc = subprocess.Popen(['testr', 'list-tests', regex], env=env, + stdout=subprocess.PIPE) + out = proc.communicate()[0] + raw_test_list = out.split('\n') + bad = False + test_list = [] + exclude_list = ['CAPTURE', 'TEST_TIMEOUT', 'PYTHON', + 'subunit.run discover'] + for line in raw_test_list: + for exclude in exclude_list: + if exclude in line: + bad = True + break + elif not line: + bad = True + break + if not bad: + test_list.append(line) + bad = False + count = 0 + failed = False + if not test_list: + print("No tests to run") + exit(1) + # If pretty or subunit output is desired manually loop forever over + # test individually and generate the desired output in a linear series + # this avoids 1411804 while retaining most of the desired behavior + while True: + for test in test_list: + if pretty: + cmd = ['python', '-m', 'subunit.run', test] + ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) + proc = subprocess.Popen(['subunit-trace', + '--no-failure-debug', + '--no-summary'], env=env, + stdin=ps.stdout) + ps.stdout.close() + proc.communicate() + if proc.returncode > 0: + failed = True + break + else: + try: + subunit_run.main([sys.argv[0], test], sys.stdout) + except SystemExit as e: + if e > 0: + print("Ran %s tests without failure" % count) + exit(1) + else: + raise + count = count + 1 + if failed: + print("Ran %s tests without failure" % count) + exit(0) + # If not until-failure special case call testr like normal + elif pretty and not list_tests: ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'], env=env, stdin=ps.stdout) @@ -147,12 +213,17 @@ def main(): msg = "You can't specify a concurrency to use when running serially" print(msg) exit(4) + if (opts.pdb or opts.no_discover) and opts.until_failure: + msg = "You can not use until_failure mode with pdb or no-discover" + print(msg) + exit(5) exclude_regex = construct_regex(opts.blacklist_file, opts.regex) if not os.path.isdir('.testrepository'): subprocess.call(['testr', 'init']) if not opts.no_discover and not opts.pdb: exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list, - opts.slowest, opts.parallel, opts.concurrency)) + opts.slowest, opts.parallel, opts.concurrency, + opts.until_failure)) elif opts.pdb: exit(call_testtools_run(opts.pdb)) else: diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py index 304c633..91c2ff7 100755 --- a/os_testr/subunit_trace.py +++ b/os_testr/subunit_trace.py @@ -275,6 +275,9 @@ def parse_args(): help="Threshold to use for displaying percent change " "from the avg run time. If one is not specified " "the percent change will always be displayed") + parser.add_argument('--no-summary', action='store_true', + help="Don't print the summary of the test run after " + " completes") return parser.parse_args() @@ -305,7 +308,8 @@ def main(): exit(1) if args.post_fails: print_fails(sys.stdout) - print_summary(sys.stdout, elapsed_time) + if not args.no_summary: + print_summary(sys.stdout, elapsed_time) exit(0 if summary.wasSuccessful() else 1) From ef2e83d3967eb42e25c1ae026a653da78c129493 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Thu, 19 Mar 2015 00:10:37 -0400 Subject: [PATCH 13/23] Ensure failure is printed with --until-failure in pretty mode This commit adds a missing option to calling subunit trace inside the until-failure loop. Previously, when a test failed it would not print the failure output which would make it impossible to debug why it had failed. This fixes that oversight. --- os_testr/os_testr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index 819414d..149d5ce 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -139,7 +139,7 @@ def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur, cmd = ['python', '-m', 'subunit.run', test] ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE) proc = subprocess.Popen(['subunit-trace', - '--no-failure-debug', + '--no-failure-debug', '-f', '--no-summary'], env=env, stdin=ps.stdout) ps.stdout.close() From 42d450eb00c9fb205e8778e4335e8d48488ab8d3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 31 Mar 2015 10:42:25 -0400 Subject: [PATCH 14/23] Add subunit2html from jenkins slave scripts This commit adds another utility to os-testr, subunit2html, which is used to generate html output from a subunit stream. This utility is currently being used after OpenStack test jobs to generate the testr_results.html page. The previous home for this file was in the openstack-infra project config repo as a jenkins slave script. --- os_testr/subunit2html.py | 727 +++++++++++++++++++++++++++++++++++++++ setup.cfg | 1 + 2 files changed, 728 insertions(+) create mode 100755 os_testr/subunit2html.py diff --git a/os_testr/subunit2html.py b/os_testr/subunit2html.py new file mode 100755 index 0000000..96c289f --- /dev/null +++ b/os_testr/subunit2html.py @@ -0,0 +1,727 @@ +#!/usr/bin/python +""" +Utility to convert a subunit stream to an html results file. +Code is adapted from the pyunit Html test runner at +http://tungwaiyip.info/software/HTMLTestRunner.html + +Takes two arguments. First argument is path to subunit log file, second +argument is path of desired output file. Second argument is optional, +defaults to 'results.html'. + +Original HTMLTestRunner License: +------------------------------------------------------------------------ +Copyright (c) 2004-2007, Wai Yip Tung +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name Wai Yip Tung nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import collections +import datetime +import io +import sys +import traceback +from xml.sax import saxutils + +import subunit +import testtools + +__version__ = '0.1' + + +class TemplateData(object): + """ + Define a HTML template for report customerization and generation. + + Overall structure of an HTML report + + HTML + +------------------------+ + |<html> | + | <head> | + | | + | STYLESHEET | + | +----------------+ | + | | | | + | +----------------+ | + | | + | </head> | + | | + | <body> | + | | + | HEADING | + | +----------------+ | + | | | | + | +----------------+ | + | | + | REPORT | + | +----------------+ | + | | | | + | +----------------+ | + | | + | ENDING | + | +----------------+ | + | | | | + | +----------------+ | + | | + | </body> | + |</html> | + +------------------------+ + """ + + STATUS = { + 0: 'pass', + 1: 'fail', + 2: 'error', + 3: 'skip', + } + + DEFAULT_TITLE = 'Unit Test Report' + DEFAULT_DESCRIPTION = '' + + # ------------------------------------------------------------------------ + # HTML Template + + HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> +<head> + <title>%(title)s</title> + <meta name="generator" content="%(generator)s"/> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> + %(stylesheet)s +</head> +<body> +<script language="javascript" type="text/javascript"><!-- +output_list = Array(); + +/* level - 0:Summary; 1:Failed; 2:All */ +function showCase(level) { + trs = document.getElementsByTagName("tr"); + for (var i = 0; i < trs.length; i++) { + tr = trs[i]; + id = tr.id; + if (id.substr(0,2) == 'ft') { + if (level < 1) { + tr.className = 'hiddenRow'; + } + else { + tr.className = ''; + } + } + if (id.substr(0,2) == 'pt') { + if (level > 1) { + tr.className = ''; + } + else { + tr.className = 'hiddenRow'; + } + } + } +} + + +function showClassDetail(cid, count) { + var id_list = Array(count); + var toHide = 1; + for (var i = 0; i < count; i++) { + tid0 = 't' + cid.substr(1) + '.' + (i+1); + tid = 'f' + tid0; + tr = document.getElementById(tid); + if (!tr) { + tid = 'p' + tid0; + tr = document.getElementById(tid); + } + id_list[i] = tid; + if (tr.className) { + toHide = 0; + } + } + for (var i = 0; i < count; i++) { + tid = id_list[i]; + if (toHide) { + document.getElementById('div_'+tid).style.display = 'none' + document.getElementById(tid).className = 'hiddenRow'; + } + else { + document.getElementById(tid).className = ''; + } + } +} + + +function showTestDetail(div_id){ + var details_div = document.getElementById(div_id) + var displayState = details_div.style.display + // alert(displayState) + if (displayState != 'block' ) { + displayState = 'block' + details_div.style.display = 'block' + } + else { + details_div.style.display = 'none' + } +} + + +function html_escape(s) { + s = s.replace(/&/g,'&'); + s = s.replace(/</g,'<'); + s = s.replace(/>/g,'>'); + return s; +} + +/* obsoleted by detail in <div> +function showOutput(id, name) { + var w = window.open("", //url + name, + "resizable,scrollbars,status,width=800,height=450"); + d = w.document; + d.write("<pre>"); + d.write(html_escape(output_list[id])); + d.write("\n"); + d.write("<a href='javascript:window.close()'>close</a>\n"); + d.write("</pre>\n"); + d.close(); +} +*/ +--></script> + +%(heading)s +%(report)s +%(ending)s + +</body> +</html> +""" + # variables: (title, generator, stylesheet, heading, report, ending) + + # ------------------------------------------------------------------------ + # Stylesheet + # + # alternatively use a <link> for external style sheet, e.g. + # <link rel="stylesheet" href="$url" type="text/css"> + + STYLESHEET_TMPL = """ +<style type="text/css" media="screen"> +body { font-family: verdana, arial, helvetica, sans-serif; + font-size: 80%; } +table { font-size: 100%; width: 100%;} +pre { font-size: 80%; } + +/* -- heading -------------------------------------------------------------- */ +h1 { + font-size: 16pt; + color: gray; +} +.heading { + margin-top: 0ex; + margin-bottom: 1ex; +} + +.heading .attribute { + margin-top: 1ex; + margin-bottom: 0; +} + +.heading .description { + margin-top: 4ex; + margin-bottom: 6ex; +} + +/* -- css div popup -------------------------------------------------------- */ +a.popup_link { +} + +a.popup_link:hover { + color: red; +} + +.popup_window { + display: none; + overflow-x: scroll; + /*border: solid #627173 1px; */ + padding: 10px; + background-color: #E6E6D6; + font-family: "Ubuntu Mono", "Lucida Console", "Courier New", monospace; + text-align: left; + font-size: 8pt; +} + +} +/* -- report --------------------------------------------------------------- */ +#show_detail_line { + margin-top: 3ex; + margin-bottom: 1ex; +} +#result_table { + width: 100%; + border-collapse: collapse; + border: 1px solid #777; +} +#header_row { + font-weight: bold; + color: white; + background-color: #777; +} +#result_table td { + border: 1px solid #777; + padding: 2px; +} +#total_row { font-weight: bold; } +.passClass { background-color: #6c6; } +.failClass { background-color: #c60; } +.errorClass { background-color: #c00; } +.passCase { color: #6c6; } +.failCase { color: #c60; font-weight: bold; } +.errorCase { color: #c00; font-weight: bold; } +.hiddenRow { display: none; } +.testcase { margin-left: 2em; } +td.testname {width: 40%} +td.small {width: 40px} + +/* -- ending --------------------------------------------------------------- */ +#ending { +} + +</style> +""" + + # ------------------------------------------------------------------------ + # Heading + # + + HEADING_TMPL = """<div class='heading'> +<h1>%(title)s</h1> +%(parameters)s +<p class='description'>%(description)s</p> +</div> + +""" # variables: (title, parameters, description) + + HEADING_ATTRIBUTE_TMPL = """ +<p class='attribute'><strong>%(name)s:</strong> %(value)s</p> +""" # variables: (name, value) + + # ------------------------------------------------------------------------ + # Report + # + + REPORT_TMPL = """ +<p id='show_detail_line'>Show +<a href='javascript:showCase(0)'>Summary</a> +<a href='javascript:showCase(1)'>Failed</a> +<a href='javascript:showCase(2)'>All</a> +</p> +<table id='result_table'> +<colgroup> +<col align='left' /> +<col align='right' /> +<col align='right' /> +<col align='right' /> +<col align='right' /> +<col align='right' /> +<col align='right' /> +<col align='right' /> +</colgroup> +<tr id='header_row'> + <td>Test Group/Test case</td> + <td>Count</td> + <td>Pass</td> + <td>Fail</td> + <td>Error</td> + <td>Skip</td> + <td>View</td> + <td> </td> +</tr> +%(test_list)s +<tr id='total_row'> + <td>Total</td> + <td>%(count)s</td> + <td>%(Pass)s</td> + <td>%(fail)s</td> + <td>%(error)s</td> + <td>%(skip)s</td> + <td> </td> + <td> </td> +</tr> +</table> +""" # variables: (test_list, count, Pass, fail, error) + + REPORT_CLASS_TMPL = r""" +<tr class='%(style)s'> + <td class="testname">%(desc)s</td> + <td class="small">%(count)s</td> + <td class="small">%(Pass)s</td> + <td class="small">%(fail)s</td> + <td class="small">%(error)s</td> + <td class="small">%(skip)s</td> + <td class="small"><a href="javascript:showClassDetail('%(cid)s',%(count)s)" +>Detail</a></td> + <td> </td> +</tr> +""" # variables: (style, desc, count, Pass, fail, error, cid) + + REPORT_TEST_WITH_OUTPUT_TMPL = r""" +<tr id='%(tid)s' class='%(Class)s'> + <td class='%(style)s'><div class='testcase'>%(desc)s</div></td> + <td colspan='7' align='left'> + + <!--css div popup start--> + <a class="popup_link" onfocus='this.blur();' + href="javascript:showTestDetail('div_%(tid)s')" > + %(status)s</a> + + <div id='div_%(tid)s' class="popup_window"> + <div style='text-align: right; color:red;cursor:pointer'> + <a onfocus='this.blur();' +onclick="document.getElementById('div_%(tid)s').style.display = 'none' " > + [x]</a> + </div> + <pre> + %(script)s + </pre> + </div> + <!--css div popup end--> + + </td> +</tr> +""" # variables: (tid, Class, style, desc, status) + + REPORT_TEST_NO_OUTPUT_TMPL = r""" +<tr id='%(tid)s' class='%(Class)s'> + <td class='%(style)s'><div class='testcase'>%(desc)s</div></td> + <td colspan='6' align='center'>%(status)s</td> +</tr> +""" # variables: (tid, Class, style, desc, status) + + REPORT_TEST_OUTPUT_TMPL = r""" +%(id)s: %(output)s +""" # variables: (id, output) + + # ------------------------------------------------------------------------ + # ENDING + # + + ENDING_TMPL = """<div id='ending'> </div>""" + +# -------------------- The end of the Template class ------------------- + + +class ClassInfoWrapper(object): + def __init__(self, name, mod): + self.name = name + self.mod = mod + + def __repr__(self): + return "%s" % (self.name) + + +class HtmlOutput(testtools.TestResult): + """Output test results in html.""" + + def __init__(self, html_file='result.html'): + super(HtmlOutput, self).__init__() + self.success_count = 0 + self.failure_count = 0 + self.error_count = 0 + self.skip_count = 0 + self.result = [] + self.html_file = html_file + + def addSuccess(self, test): + self.success_count += 1 + output = test.shortDescription() + if output is None: + output = test.id() + self.result.append((0, test, output, '')) + + def addSkip(self, test, err): + output = test.shortDescription() + if output is None: + output = test.id() + self.skip_count += 1 + self.result.append((3, test, output, '')) + + def addError(self, test, err): + output = test.shortDescription() + if output is None: + output = test.id() + # Skipped tests are handled by SkipTest Exceptions. + #if err[0] == SkipTest: + # self.skip_count += 1 + # self.result.append((3, test, output, '')) + else: + self.error_count += 1 + _exc_str = self.formatErr(err) + self.result.append((2, test, output, _exc_str)) + + def addFailure(self, test, err): + print(test) + self.failure_count += 1 + _exc_str = self.formatErr(err) + output = test.shortDescription() + if output is None: + output = test.id() + self.result.append((1, test, output, _exc_str)) + + def formatErr(self, err): + exctype, value, tb = err + return ''.join(traceback.format_exception(exctype, value, tb)) + + def stopTestRun(self): + super(HtmlOutput, self).stopTestRun() + self.stopTime = datetime.datetime.now() + report_attrs = self._getReportAttributes() + generator = 'subunit2html %s' % __version__ + heading = self._generate_heading(report_attrs) + report = self._generate_report() + ending = self._generate_ending() + output = TemplateData.HTML_TMPL % dict( + title=saxutils.escape(TemplateData.DEFAULT_TITLE), + generator=generator, + stylesheet=TemplateData.STYLESHEET_TMPL, + heading=heading, + report=report, + ending=ending, + ) + if self.html_file: + with open(self.html_file, 'wb') as html_file: + html_file.write(output.encode('utf8')) + + def _getReportAttributes(self): + """Return report attributes as a list of (name, value).""" + status = [] + if self.success_count: + status.append('Pass %s' % self.success_count) + if self.failure_count: + status.append('Failure %s' % self.failure_count) + if self.error_count: + status.append('Error %s' % self.error_count) + if self.skip_count: + status.append('Skip %s' % self.skip_count) + if status: + status = ' '.join(status) + else: + status = 'none' + return [ + ('Status', status), + ] + + def _generate_heading(self, report_attrs): + a_lines = [] + for name, value in report_attrs: + line = TemplateData.HEADING_ATTRIBUTE_TMPL % dict( + name=saxutils.escape(name), + value=saxutils.escape(value), + ) + a_lines.append(line) + heading = TemplateData.HEADING_TMPL % dict( + title=saxutils.escape(TemplateData.DEFAULT_TITLE), + parameters=''.join(a_lines), + description=saxutils.escape(TemplateData.DEFAULT_DESCRIPTION), + ) + return heading + + def _generate_report(self): + rows = [] + sortedResult = self._sortResult(self.result) + for cid, (cls, cls_results) in enumerate(sortedResult): + # subtotal for a class + np = nf = ne = ns = 0 + for n, t, o, e in cls_results: + if n == 0: + np += 1 + elif n == 1: + nf += 1 + elif n == 2: + ne += 1 + else: + ns += 1 + + # format class description + if cls.mod == "__main__": + name = cls.name + else: + name = "%s" % (cls.name) + doc = cls.__doc__ and cls.__doc__.split("\n")[0] or "" + desc = doc and '%s: %s' % (name, doc) or name + + row = TemplateData.REPORT_CLASS_TMPL % dict( + style=(ne > 0 and 'errorClass' or nf > 0 + and 'failClass' or 'passClass'), + desc = desc, + count = np + nf + ne + ns, + Pass = np, + fail = nf, + error = ne, + skip = ns, + cid = 'c%s' % (cid + 1), + ) + rows.append(row) + + for tid, (n, t, o, e) in enumerate(cls_results): + self._generate_report_test(rows, cid, tid, n, t, o, e) + + report = TemplateData.REPORT_TMPL % dict( + test_list=''.join(rows), + count=str(self.success_count + self.failure_count + + self.error_count + self.skip_count), + Pass=str(self.success_count), + fail=str(self.failure_count), + error=str(self.error_count), + skip=str(self.skip_count), + ) + return report + + def _sortResult(self, result_list): + # unittest does not seems to run in any particular order. + # Here at least we want to group them together by class. + rmap = {} + classes = [] + for n, t, o, e in result_list: + if hasattr(t, '_tests'): + for inner_test in t._tests: + self._add_cls(rmap, classes, inner_test, + (n, inner_test, o, e)) + else: + self._add_cls(rmap, classes, t, (n, t, o, e)) + classort = lambda s: str(s) + sortedclasses = sorted(classes, key=classort) + r = [(cls, rmap[str(cls)]) for cls in sortedclasses] + return r + + def _add_cls(self, rmap, classes, test, data_tuple): + if hasattr(test, 'test'): + test = test.test + if test.__class__ == subunit.RemotedTestCase: + #print(test._RemotedTestCase__description.rsplit('.', 1)[0]) + cl = test._RemotedTestCase__description.rsplit('.', 1)[0] + mod = cl.rsplit('.', 1)[0] + cls = ClassInfoWrapper(cl, mod) + else: + cls = ClassInfoWrapper(str(test.__class__), str(test.__module__)) + if not str(cls) in rmap: + rmap[str(cls)] = [] + classes.append(cls) + rmap[str(cls)].append(data_tuple) + + def _generate_report_test(self, rows, cid, tid, n, t, o, e): + # e.g. 'pt1.1', 'ft1.1', etc + # ptx.x for passed/skipped tests and ftx.x for failed/errored tests. + has_output = bool(o or e) + tid = ((n == 0 or n == 3) and + 'p' or 'f') + 't%s.%s' % (cid + 1, tid + 1) + name = t.id().split('.')[-1] + # if shortDescription is not the function name, use it + if t.shortDescription().find(name) == -1: + doc = t.shortDescription() + else: + doc = None + desc = doc and ('%s: %s' % (name, doc)) or name + tmpl = (has_output and TemplateData.REPORT_TEST_WITH_OUTPUT_TMPL + or TemplateData.REPORT_TEST_NO_OUTPUT_TMPL) + + script = TemplateData.REPORT_TEST_OUTPUT_TMPL % dict( + id=tid, + output=saxutils.escape(o + e), + ) + + row = tmpl % dict( + tid=tid, + Class=((n == 0 or n == 3) and 'hiddenRow' or 'none'), + style=(n == 2 and 'errorCase' or + (n == 1 and 'failCase' or 'none')), + desc=desc, + script=script, + status=TemplateData.STATUS[n], + ) + rows.append(row) + if not has_output: + return + + def _generate_ending(self): + return TemplateData.ENDING_TMPL + + def startTestRun(self): + super(HtmlOutput, self).startTestRun() + + +class FileAccumulator(testtools.StreamResult): + + def __init__(self): + super(FileAccumulator, self).__init__() + self.route_codes = collections.defaultdict(io.BytesIO) + + def status(self, **kwargs): + if kwargs.get('file_name') != 'stdout': + return + file_bytes = kwargs.get('file_bytes') + if not file_bytes: + return + route_code = kwargs.get('route_code') + stream = self.route_codes[route_code] + stream.write(file_bytes) + + +def main(): + if len(sys.argv) < 2: + print("Need at least one argument: path to subunit log.") + exit(1) + subunit_file = sys.argv[1] + if len(sys.argv) > 2: + html_file = sys.argv[2] + else: + html_file = 'results.html' + + html_result = HtmlOutput(html_file) + stream = open(subunit_file, 'rb') + + # Feed the subunit stream through both a V1 and V2 parser. + # Depends on having the v2 capable libraries installed. + # First V2. + # Non-v2 content and captured non-test output will be presented as file + # segments called stdout. + suite = subunit.ByteStreamToStreamResult(stream, non_subunit_name='stdout') + # The HTML output code is in legacy mode. + result = testtools.StreamToExtendedDecorator(html_result) + # Divert non-test output + accumulator = FileAccumulator() + result = testtools.StreamResultRouter(result) + result.add_rule(accumulator, 'test_id', test_id=None) + result.startTestRun() + suite.run(result) + # Now reprocess any found stdout content as V1 subunit + for bytes_io in accumulator.route_codes.values(): + bytes_io.seek(0) + suite = subunit.ProtocolTestCase(bytes_io) + suite.run(html_result) + result.stopTestRun() + + +if __name__ == '__main__': + main() diff --git a/setup.cfg b/setup.cfg index 0f883c5..ab1151a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -28,6 +28,7 @@ packages = console_scripts = subunit-trace = os_testr.subunit_trace:main ostestr = os_testr.os_testr:main + subunit2html = os_testr.subunit2html:main [build_sphinx] source-dir = doc/source From f5c5aabbc5dbd0b3624d2e264959b956748c3cbc Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Wed, 1 Apr 2015 17:58:42 -0400 Subject: [PATCH 15/23] Flush out the readme in preparation for the first release This commit adds some details the README about the included utils and starts the Release Notes section in prepartion for pushing the first release. Change-Id: I45dd057bdfc6b5d8810b2f15fedabbecccdd7478 --- README.rst | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index eeb27cf..ebfd8f7 100644 --- a/README.rst +++ b/README.rst @@ -12,4 +12,15 @@ A testr wrapper to provide functionality for OpenStack projects Features -------- -* TODO +* ostestr: a testr wrapper that uses subunit-trace for output and builds some + helpful extra functionality around testr +* subunit-trace: an output filter for a subunit stream which provides useful + information about the run +* subunit2html: generates a test results html page from a subunit stream + +Release Notes +============= + +0.1.0 +----- + * First release which includes: ostestr, subunit-trace, and subunit2html From 12ca9dd582141aeb2f6268fdc68780836e8544d7 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Wed, 1 Apr 2015 19:12:37 -0400 Subject: [PATCH 16/23] Fix pep8 issues and add apache header to subunit2html This commit fixes pep8 issues that were added when subunit2html was copied into the repo. As part of this the subunit2html was missing a license header for the additions made to it on top of the original HTMLTestRunner script it was based on. The addition of the copyright header mirrors project-config change: I6fc16d316012fd3e1c196f74df25d725a310f6dc Change-Id: Ieb42d5baddb3e1446fcd50197136f605324323aa --- os_testr/subunit2html.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/os_testr/subunit2html.py b/os_testr/subunit2html.py index 96c289f..fd93ac7 100755 --- a/os_testr/subunit2html.py +++ b/os_testr/subunit2html.py @@ -1,4 +1,18 @@ #!/usr/bin/python +# +# Copyright 2012-2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. """ Utility to convert a subunit stream to an html results file. Code is adapted from the pyunit Html test runner at @@ -53,8 +67,7 @@ __version__ = '0.1' class TemplateData(object): - """ - Define a HTML template for report customerization and generation. + """Define a HTML template for report customerization and generation. Overall structure of an HTML report @@ -472,9 +485,9 @@ class HtmlOutput(testtools.TestResult): if output is None: output = test.id() # Skipped tests are handled by SkipTest Exceptions. - #if err[0] == SkipTest: - # self.skip_count += 1 - # self.result.append((3, test, output, '')) + # if err[0] == SkipTest: + # self.skip_count += 1 + # self.result.append((3, test, output, '')) else: self.error_count += 1 _exc_str = self.formatErr(err) @@ -619,7 +632,6 @@ class HtmlOutput(testtools.TestResult): if hasattr(test, 'test'): test = test.test if test.__class__ == subunit.RemotedTestCase: - #print(test._RemotedTestCase__description.rsplit('.', 1)[0]) cl = test._RemotedTestCase__description.rsplit('.', 1)[0] mod = cl.rsplit('.', 1)[0] cls = ClassInfoWrapper(cl, mod) From 131b5353a7b738443d46dd90deb3623b2cc60490 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Mon, 6 Apr 2015 11:37:12 -0400 Subject: [PATCH 17/23] Add TODO file to os-testr This commit adds a TODO file to the project to indicate both short and long term work items for the project. Change-Id: I90c19330eb5d8216a88f40411ed883e6f64c750d --- TODO.rst | 17 +++++++++++++++++ doc/source/index.rst | 1 + doc/source/todo.rst | 1 + 3 files changed, 19 insertions(+) create mode 100644 TODO.rst create mode 100644 doc/source/todo.rst diff --git a/TODO.rst b/TODO.rst new file mode 100644 index 0000000..135bcd6 --- /dev/null +++ b/TODO.rst @@ -0,0 +1,17 @@ +Work Items for os-testr +======================= + +Short Term +---------- + * Expose all subunit-trace options through ostestr + * Add --html option to ostestr to run testr with subunit2html output + * Add unit tests + * For ostestr test selection api + * Response code validation on more argument permutations +Long Term +--------- + * Lock down test selection CLI + * When this is done it will become release 1.0.0 + * Add subunit-trace functional tests + ** Sample subunit streams and test output from subunit-trace + * Add testing for subunit2html diff --git a/doc/source/index.rst b/doc/source/index.rst index feb274b..448d655 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -15,6 +15,7 @@ Contents: installation usage contributing + todo Indices and tables ================== diff --git a/doc/source/todo.rst b/doc/source/todo.rst new file mode 100644 index 0000000..0a7409f --- /dev/null +++ b/doc/source/todo.rst @@ -0,0 +1 @@ +.. include:: ../../TODO.rst From ff3dc87d6b496af625652838936288e0c3882853 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Mon, 13 Apr 2015 11:32:50 -0400 Subject: [PATCH 18/23] Add support for having comments in the exclude file This commit adds the support to have comments in the exclude file. After this commit anything after a '#' will be treated as a comment and ignored for matching. A new option --print-exclude is added which will print out what is being skipped by the exclude file with any comments in the file. Change-Id: Ia357730f918e0a57cf2ac51cd8195e2721667511 --- os_testr/os_testr.py | 78 +++++++++++++++++++++++++++++++------------- 1 file changed, 55 insertions(+), 23 deletions(-) diff --git a/os_testr/os_testr.py b/os_testr/os_testr.py index 149d5ce..353868c 100755 --- a/os_testr/os_testr.py +++ b/os_testr/os_testr.py @@ -66,20 +66,69 @@ def parse_args(): 'encountered. Running with subunit or pretty' 'output enable will force the loop to run tests' 'serially') + parser.add_argument('--print-exclude', action='store_true', + help='If an exclude file is used this option will ' + 'prints the comment from the same line and all ' + 'skipped tests before the test run') parser.set_defaults(pretty=True, slowest=True, parallel=True) opts = parser.parse_args() return opts -def construct_regex(blacklist_file, regex): +def _get_test_list(regex, env=None): + env = env or copy.deepcopy(os.environ) + proc = subprocess.Popen(['testr', 'list-tests', regex], env=env, + stdout=subprocess.PIPE) + out = proc.communicate()[0] + raw_test_list = out.split('\n') + bad = False + test_list = [] + exclude_list = ['OS_', 'CAPTURE', 'TEST_TIMEOUT', 'PYTHON', + 'subunit.run discover'] + for line in raw_test_list: + for exclude in exclude_list: + if exclude in line: + bad = True + break + elif not line: + bad = True + break + if not bad: + test_list.append(line) + bad = False + return test_list + + +def print_skips(regex, message): + test_list = _get_test_list(regex) + if test_list: + if message: + print(message) + else: + print('Skipped because of regex %s:' % regex) + for test in test_list: + print(test) + # Extra whitespace to separate + print('\n') + + +def construct_regex(blacklist_file, regex, print_exclude): if not blacklist_file: exclude_regex = '' else: black_file = open(blacklist_file, 'r') exclude_regex = '' for line in black_file: - regex = line.strip() - exclude_regex = '|'.join([regex, exclude_regex]) + raw_line = line.strip() + split_line = raw_line.split('#') + # Before the # is the regex + regex = split_line[0].strip() + # After the # is a comment + comment = split_line[1].strip() + if regex: + if print_exclude: + print_skips(regex, comment) + exclude_regex = '|'.join([regex, exclude_regex]) if exclude_regex: exclude_regex = "'(?!.*" + exclude_regex + ")" if regex: @@ -106,25 +155,7 @@ def call_testr(regex, subunit, pretty, list_tests, slowest, parallel, concur, # This workaround is necessary because of lp bug 1411804 it's super hacky # and makes tons of unfounded assumptions, but it works for the most part if (subunit or pretty) and until_failure: - proc = subprocess.Popen(['testr', 'list-tests', regex], env=env, - stdout=subprocess.PIPE) - out = proc.communicate()[0] - raw_test_list = out.split('\n') - bad = False - test_list = [] - exclude_list = ['CAPTURE', 'TEST_TIMEOUT', 'PYTHON', - 'subunit.run discover'] - for line in raw_test_list: - for exclude in exclude_list: - if exclude in line: - bad = True - break - elif not line: - bad = True - break - if not bad: - test_list.append(line) - bad = False + test_list = _get_test_list(regex, env) count = 0 failed = False if not test_list: @@ -217,7 +248,8 @@ def main(): msg = "You can not use until_failure mode with pdb or no-discover" print(msg) exit(5) - exclude_regex = construct_regex(opts.blacklist_file, opts.regex) + exclude_regex = construct_regex(opts.blacklist_file, opts.regex, + opts.print_exclude) if not os.path.isdir('.testrepository'): subprocess.call(['testr', 'init']) if not opts.no_discover and not opts.pdb: From 9832648353432fa0e05a3003bbf67052453e9d3f Mon Sep 17 00:00:00 2001 From: Thomas Bechtold <tbechtold@suse.com> Date: Sat, 18 Apr 2015 13:39:25 +0200 Subject: [PATCH 19/23] Fix ValueError in subunit_trace When a subunit stream for a testcase doesn't contain start end enddate, the duration can't be calculated which leads to a: ValueError: could not convert string to float Check now if the duration is an empty string and add basic test coverage based on ddt for the subunit_trace command. Change-Id: I9953019794ba53fcfcb20e32fecbe94da22c9565 --- os_testr/subunit_trace.py | 6 ++- os_testr/tests/test_subunit_trace.py | 61 ++++++++++++++++++++++++++++ test-requirements.txt | 1 + 3 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 os_testr/tests/test_subunit_trace.py diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py index 91c2ff7..c509e43 100755 --- a/os_testr/subunit_trace.py +++ b/os_testr/subunit_trace.py @@ -220,7 +220,11 @@ def run_time(): runtime = 0.0 for k, v in RESULTS.items(): for test in v: - runtime += float(get_duration(test['timestamps']).strip('s')) + test_dur = get_duration(test['timestamps']).strip('s') + # NOTE(toabctl): get_duration() can return an empty string + # which leads to a ValueError when casting to float + if test_dur: + runtime += float(test_dur) return runtime diff --git a/os_testr/tests/test_subunit_trace.py b/os_testr/tests/test_subunit_trace.py new file mode 100644 index 0000000..5544636 --- /dev/null +++ b/os_testr/tests/test_subunit_trace.py @@ -0,0 +1,61 @@ +# Copyright 2015 SUSE Linux GmbH +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime as dt + +from ddt import data +from ddt import ddt +from ddt import unpack +from mock import patch + +from os_testr import subunit_trace +from os_testr.tests import base + + +@ddt +class TestSubunitTrace(base.TestCase): + + @data(([dt(2015, 4, 17, 22, 23, 14, 111111), + dt(2015, 4, 17, 22, 23, 14, 111111)], + "0.000000s"), + ([dt(2015, 4, 17, 22, 23, 14, 111111), + dt(2015, 4, 17, 22, 23, 15, 111111)], + "1.000000s"), + ([dt(2015, 4, 17, 22, 23, 14, 111111), + None], + "")) + @unpack + def test_get_durating(self, timestamps, expected_result): + self.assertEqual(subunit_trace.get_duration(timestamps), + expected_result) + + @data(([dt(2015, 4, 17, 22, 23, 14, 111111), + dt(2015, 4, 17, 22, 23, 14, 111111)], + 0.0), + ([dt(2015, 4, 17, 22, 23, 14, 111111), + dt(2015, 4, 17, 22, 23, 15, 111111)], + 1.0), + ([dt(2015, 4, 17, 22, 23, 14, 111111), + None], + 0.0)) + @unpack + def test_run_time(self, timestamps, expected_result): + patched_res = { + 0: [ + {'timestamps': timestamps} + ] + } + with patch.dict(subunit_trace.RESULTS, patched_res, clear=True): + self.assertEqual(subunit_trace.run_time(), expected_result) diff --git a/test-requirements.txt b/test-requirements.txt index c7208f0..d0ca195 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,3 +10,4 @@ sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 oslosphinx>=2.2.0 # Apache-2.0 oslotest>=1.2.0 # Apache-2.0 testscenarios>=0.4 +ddt>=0.4.0 From de604832c3ece2aaeb56cc7a427dcb1dd833c0e1 Mon Sep 17 00:00:00 2001 From: TerryHowe <terrylhowe@gmail.com> Date: Mon, 1 Jun 2015 13:28:53 -0600 Subject: [PATCH 20/23] Catch exception trying to extract test time The database I am getting back is a gdm database and it does not have a get method. Catch the exception and try something else. If that blows up, ignore because we'd rather see the results of our tests. Change-Id: I2882e19d49f2fb3471669f5eb8a017c5d5ac98c2 --- os_testr/subunit_trace.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py index c509e43..960b476 100755 --- a/os_testr/subunit_trace.py +++ b/os_testr/subunit_trace.py @@ -131,7 +131,14 @@ def find_test_run_time_diff(test_id, run_time): test_times = dbm.open(times_db_path) except Exception: return False - avg_runtime = float(test_times.get(str(test_id), False)) + try: + avg_runtime = float(test_times.get(str(test_id), False)) + except Exception: + try: + avg_runtime = float(test_times[str(test_id)]) + except Exception: + avg_runtime = False + if avg_runtime and avg_runtime > 0: run_time = float(run_time.rstrip('s')) perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100 From 4292155e3e546064d10d56a5a0cd1dedc7eb25bd Mon Sep 17 00:00:00 2001 From: James Page <james.page@ubuntu.com> Date: Tue, 23 Jun 2015 09:47:44 +0100 Subject: [PATCH 21/23] Misc Python 3 compatibility fixes Python 3 renames StringIO -> io. Use six to deal with this change. Introduces new test dependency on six for StringIO. Change-Id: Ia875b7fcbb976599053970ef79ed3f3474626bad --- os_testr/tests/test_return_codes.py | 6 +++--- test-requirements.txt | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/os_testr/tests/test_return_codes.py b/os_testr/tests/test_return_codes.py index 591e4dd..082a74a 100644 --- a/os_testr/tests/test_return_codes.py +++ b/os_testr/tests/test_return_codes.py @@ -14,13 +14,13 @@ import os import shutil -import StringIO import subprocess import tempfile import testtools from os_testr.tests import base +from six import StringIO DEVNULL = open(os.devnull, 'wb') @@ -47,8 +47,8 @@ class TestReturnCodes(base.TestCase): shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file) shutil.copy('os_testr/tests/files/__init__.py', self.init_file) - self.stdout = StringIO.StringIO() - self.stderr = StringIO.StringIO() + self.stdout = StringIO() + self.stderr = StringIO() # Change directory, run wrapper and check result self.addCleanup(os.chdir, os.path.abspath(os.curdir)) os.chdir(self.directory) diff --git a/test-requirements.txt b/test-requirements.txt index d0ca195..608d49e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,3 +11,4 @@ oslosphinx>=2.2.0 # Apache-2.0 oslotest>=1.2.0 # Apache-2.0 testscenarios>=0.4 ddt>=0.4.0 +six>=1.9.0 From 7376e96ab315cee8b25816c5a632ffbce56ee836 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 23 Jun 2015 19:23:13 -0400 Subject: [PATCH 22/23] Disable printing percent change on run time by default This commit disables printing the percent change in run time by default. This was a cool experiment, but isn't really useful in practice. Especially in the gate, things are so noisy and there is so much variance looking at the change for a single doesn't actually mean anything. It makes more sense to make this feature opt-in because it's not generally useful. Change-Id: Iecb153452edfe1d7b55757d022ae0331ac563b35 --- os_testr/subunit_trace.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/os_testr/subunit_trace.py b/os_testr/subunit_trace.py index 960b476..7c00825 100755 --- a/os_testr/subunit_trace.py +++ b/os_testr/subunit_trace.py @@ -147,7 +147,7 @@ def find_test_run_time_diff(test_id, run_time): def show_outcome(stream, test, print_failures=False, failonly=False, - threshold='0'): + enable_diff=False, threshold='0'): global RESULTS status = test['status'] # TODO(sdague): ask lifeless why on this? @@ -176,11 +176,12 @@ def show_outcome(stream, test, print_failures=False, failonly=False, if status == 'success': out_string = '{%s} %s [%s' % (worker, name, duration) perc_diff = find_test_run_time_diff(test['id'], duration) - if perc_diff and abs(perc_diff) >= abs(float(threshold)): - if perc_diff > 0: - out_string = out_string + ' +%.2f%%' % perc_diff - else: - out_string = out_string + ' %.2f%%' % perc_diff + if enable_diff: + if perc_diff and abs(perc_diff) >= abs(float(threshold)): + if perc_diff > 0: + out_string = out_string + ' +%.2f%%' % perc_diff + else: + out_string = out_string + ' %.2f%%' % perc_diff stream.write(out_string + '] ... ok\n') print_attachments(stream, test) elif status == 'skip': @@ -282,6 +283,9 @@ def parse_args(): default=( os.environ.get('TRACE_FAILONLY', False) is not False)) + parser.add_argument('--perc-diff', '-d', action='store_true', + dest='enable_diff', + help="Print percent change in run time on each test ") parser.add_argument('--diff-threshold', '-t', dest='threshold', help="Threshold to use for displaying percent change " "from the avg run time. If one is not specified " @@ -299,7 +303,8 @@ def main(): outcomes = testtools.StreamToDict( functools.partial(show_outcome, sys.stdout, print_failures=args.print_failures, - failonly=args.failonly)) + failonly=args.failonly, + enable_diff=args.enable_diff)) summary = testtools.StreamSummary() result = testtools.CopyStreamResult([outcomes, summary]) result = testtools.StreamResultRouter(result) From 0abbeeac4b280a6157e3e89d456aa17c27c61414 Mon Sep 17 00:00:00 2001 From: Matthew Treinish <mtreinish@kortar.org> Date: Tue, 23 Jun 2015 19:31:57 -0400 Subject: [PATCH 23/23] Dogfood things for unit tests So it turns out we weren't actually using ostestr for running the os-testr unit tests. We probably should use the test runner runner wrapper we're developing to run the unit tests for the test runner runner wrapper and the other utilities in the package. Change-Id: I88af9104352163f2412c2a3cbaf6c88d0a937988 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 4657203..537b2f2 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -commands = python setup.py testr --slowest --testr-args='{posargs}' +commands = ostestr {posargs} [testenv:pep8] commands = flake8